code
stringlengths
1
1.49M
vector
listlengths
0
7.38k
snippet
listlengths
0
7.38k
if False: # set to True to insert test data store(store.product.id > 0).delete() store(store.category.id > 0).delete() if len(store(store.product.id > 0).select()) == 0: fantasy_id = store.category.insert(name='Fantasy', description='Fantasy books', small_image='testdata/hp1.jpg') hp1 = store.product.insert(name="Harry Potter and the Sorcerer's Stone", category=fantasy_id, price=7.91, small_image='testdata/hp1.jpg') hp2 = store.product.insert(name="Harry Potter and the Chamber of Secrets", category=fantasy_id, price=8.91, small_image='testdata/hp2.jpg') hp3 = store.product.insert(name="Harry Potter and the Prisoner of Azkaban", category=fantasy_id, price=8.91, small_image='testdata/hp3.jpg') hp4 = store.product.insert(name="Harry Potter and the Goblet of Fire", category=fantasy_id, price=9.91, small_image='testdata/hp4.jpg') hp5 = store.product.insert(name="Harry Potter and the Order of the Phoenix", category=fantasy_id, price=9.91, small_image='testdata/hp5.jpg') hp6 = store.product.insert(name="Harry Potter and the Half-Blood Prince", category=fantasy_id, price=9.91, small_image='testdata/hp6.jpg') store.option.insert(product=hp1, description='Bookmark', price=1.5) store.option.insert(product=hp1, description='Wizard hat', price=12) for p2 in (hp2, hp3, hp4, hp5, hp6): store.cross_sell.insert(p1=hp1, p2=p2) hp1_hard = store.product.insert(name="Harry Potter and the Sorcerer's Stone [hardcover]", category=fantasy_id, price=15.91, small_image='testdata/hp1.jpg') store.up_sell.insert(product=hp1, better=hp1_hard)
[ [ 4, 0, 0.525, 1, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 19 ], [ 8, 1, 0.1, 0.05, 1, 0.89, 0, 266, 3, 0, 0, 0, 0, 0, 2 ], [ 8, 1, 0.15, 0.05, 1, 0.89, 0.5, 266, ...
[ "if False: # set to True to insert test data \n store(store.product.id > 0).delete()\n store(store.category.id > 0).delete()\n if len(store(store.product.id > 0).select()) == 0:\n fantasy_id = store.category.insert(name='Fantasy', description='Fantasy books', small_image='testdata/hp1.jpg')\n ...
UNDEFINED = -1 if request.env.web2py_runtime_gae: # if running on Google App Engine store = DAL('gae') # connect to Google BigTable session.connect(request, response, db=store) # and store sessions and tickets there else: store = DAL("sqlite://store.db") store.define_table('category', Field('name'), Field('description', 'text'), Field('small_image', 'upload'), ) store.define_table('product', Field('name'), Field('category', store.category), Field('description', 'text', default=''), Field('small_image', 'upload'), Field('large_image', 'upload', default=''), Field('quantity_in_stock', 'integer', default=UNDEFINED), # if UNDEFINED, don't show Field('max_quantity', 'integer', default=0), # maximum quantity that can be purchased in an order. If 0, no limit. If UNDEFINED, don't show. Field('price', 'double', default=1.0), Field('old_price', 'double', default=0.0), Field('weight_in_pounds', 'double', default=1), Field('tax_rate_in_your_state', 'double', default=10.0), Field('tax_rate_outside_your_state', 'double', default=0.0), Field('featured', 'boolean', default=False), Field('allow_rating', 'boolean', default=False), Field('rating', 'integer', default='0'), Field('viewed', 'integer', default='0'), Field('clicked', 'integer', default='0')) # each product can have optional addons store.define_table('option', Field('product', store.product), Field('description'), Field('price', 'double', default=1.0), ) # support for merchandising # for p1 show p2, and for p2 show p1 store.define_table('cross_sell', Field('p1', store.product), Field('p2', store.product), ) # for product, show better, but not the reverse store.define_table('up_sell', Field('product', store.product), Field('better', store.product), ) store.define_table('comment', Field('product', store.product), Field('author'), Field('email'), Field('body', 'text'), Field('rate', 'integer') ) store.define_table('info', Field('google_merchant_id', default='[google checkout id]', length=256), Field('name', default='[store name]'), Field('headline', default='[store headline]'), Field('address', default='[store address]'), Field('city', default='[store city]'), Field('state', default='[store state]'), Field('zip_code', default='[store zip]'), Field('phone', default='[store phone number]'), Field('fax', default='[store fax number]'), Field('email', requires=IS_EMAIL(), default='yourname@yourdomain.com'), Field('description', 'text', default='[about your store]'), Field('why_buy', 'text', default='[why buy at your store]'), Field('return_policy', 'text', default='[what is your return policy]'), Field('logo', 'upload', default=''), Field('color_background', length=10, default='white'), Field('color_foreground', length=10, default='black'), Field('color_header', length=10, default='#F6F6F6'), Field('color_link', length=10, default='#385ea2'), Field('font_family', length=32, default='arial, helvetica'), Field('ship_usps_express_mail', 'boolean', default=True), Field('ship_usps_express_mail_fc', 'double', default=0), Field('ship_usps_express_mail_vc', 'double', default=0), Field('ship_usps_express_mail_bc', 'double', default=0), Field('ship_usps_priority_mail', 'boolean', default=True), Field('ship_usps_priority_mail_fc', 'double', default=0), Field('ship_usps_priority_mail_vc', 'double', default=0), Field('ship_usps_priority_mail_bc', 'double', default=0), Field('ship_ups_next_day_air', 'boolean', default=True), Field('ship_ups_next_day_air_fc', 'double', default=0), Field('ship_ups_next_day_air_vc', 'double', default=0), Field('ship_ups_next_day_air_bc', 'double', default=0), Field('ship_ups_second_day_air', 'boolean', default=True), Field('ship_ups_second_day_air_fc', 'double', default=0), Field('ship_ups_second_day_air_vc', 'double', default=0), Field('ship_ups_second_day_air_bc', 'double', default=0), Field('ship_ups_ground', 'boolean', default=True), Field('ship_ups_ground_fc', 'double', default=0), Field('ship_ups_ground_vc', 'double', default=0), Field('ship_ups_ground_bc', 'double', default=0), Field('ship_fedex_priority_overnight', 'boolean', default=True), Field('ship_fedex_priority_overnight_fc', 'double', default=0), Field('ship_fedex_priority_overnight_vc', 'double', default=0), Field('ship_fedex_priority_overnight_bc', 'double', default=0), Field('ship_fedex_second_day', 'boolean', default=True), Field('ship_fedex_second_day_fc', 'double', default=0), Field('ship_fedex_second_day_vc', 'double', default=0), Field('ship_fedex_second_day_bc', 'double', default=0), Field('ship_fedex_ground', 'boolean', default=True), Field('ship_fedex_ground_fc', 'double', default=0), Field('ship_fedex_ground_vc', 'double', default=0), Field('ship_fedex_ground_bc', 'double', default=0) ) store.category.name.requires = IS_NOT_IN_DB(store, 'category.name') store.product.name.requires = IS_NOT_IN_DB(store, 'product.name') store.product.category.requires = IS_IN_DB(store, 'category.id', 'category.name') store.product.name.requires = IS_NOT_EMPTY() store.product.description.requires = IS_NOT_EMPTY() store.product.quantity_in_stock.requires = IS_INT_IN_RANGE(0, 1000) store.product.price.requires = IS_FLOAT_IN_RANGE(0, 10000) store.product.rating.requires = IS_INT_IN_RANGE(-10000, 10000) store.product.viewed.requires = IS_INT_IN_RANGE(0, 1000000) store.product.clicked.requires = IS_INT_IN_RANGE(0, 1000000) store.option.product.requires = IS_IN_DB(store, 'product.id', 'product.name') store.cross_sell.p1.requires = IS_IN_DB(store, 'product.id', 'product.name') store.cross_sell.p2.requires = IS_IN_DB(store, 'product.id', 'product.name') store.up_sell.product.requires = IS_IN_DB(store, 'product.id', 'product.name') store.up_sell.better.requires = IS_IN_DB(store, 'product.id', 'product.name') store.comment.product.requires = IS_IN_DB(store, 'product.id', 'product.name') store.comment.author.requires = IS_NOT_EMPTY() store.comment.email.requires = IS_EMAIL() store.comment.body.requires = IS_NOT_EMPTY() store.comment.rate.requires = IS_IN_SET(range(5, 0, -1)) for field in store.info.fields: if field[:-2] in ['fc', 'vc']: store.info[field].requires = IS_FLOAT_IN_RANGE(0, 100) if len(store(store.info.id > 0).select()) == 0: store.info.insert(name='[store name]') mystore = store(store.info.id > 0).select()[0]
[ [ 14, 0, 0.0065, 0.0065, 0, 0.66, 0, 67, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 0, 0.039, 0.0325, 0, 0.66, 0.0323, 0, 7, 0, 0, 0, 0, 0, 3 ], [ 14, 1, 0.0325, 0.0065, 1, 0.3, ...
[ "UNDEFINED = -1", "if request.env.web2py_runtime_gae: # if running on Google App Engine\n store = DAL('gae') # connect to Google BigTable\n session.connect(request, response, db=store) # and store sessions and tickets there\nelse:\n store = DAL(\"sqlite://store.db\")"...
# import re # delimiter to use between words in URL URL_DELIMITER = '-' def pretty_url(id, name): """Create pretty URL from record name and ID """ return '%s%s%d' % (' '.join(re.sub('[^\w ]+', '', name).split()).replace(' ', URL_DELIMITER), URL_DELIMITER, id) def pretty_id(url): """Extract id from pretty URL """ return int(url.rpartition(URL_DELIMITER)[-1]) def pretty_text(s): "Make text pretty by capitalizing and using 'home' instead of 'default'" return s.replace('default', 'home').replace('_', ' ').capitalize() def title(): if response.title: return response.title elif request.function == 'index': return pretty_text(request.controller) else: return pretty_text(request.function)
[ [ 1, 0, 0.1071, 0.0357, 0, 0.66, 0, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 14, 0, 0.25, 0.0357, 0, 0.66, 0.2, 352, 1, 0, 0, 0, 0, 3, 0 ], [ 2, 0, 0.3393, 0.1429, 0, 0.66, ...
[ "import re", "URL_DELIMITER = '-'", "def pretty_url(id, name):\n \"\"\"Create pretty URL from record name and ID\n \"\"\"\n return '%s%s%d' % (' '.join(re.sub('[^\\w ]+', '', name).split()).replace(' ', URL_DELIMITER), URL_DELIMITER, id)", " \"\"\"Create pretty URL from record name and ID\n \"\...
########################################################### ### make sure administrator is on localhost ############################################################ import os, socket, datetime,copy import gluon.contenttype import gluon.fileutils ### crytical --- make a copy of the environment global_env=copy.copy(globals()) global_env['datetime']=datetime http_host = request.env.http_host.split(':')[0] remote_addr = request.env.remote_addr try: hosts=(http_host, socket.gethostbyname(remote_addr)) except: hosts=(http_host,) if remote_addr not in hosts: pass #raise HTTP(400) if not gluon.fileutils.check_credentials(request): redirect('/admin') response.view='appadmin.html' response.menu=[[T('design'),False,URL('admin','default','design', args=[request.application])], [T('db'),False,URL(r=request,f='index')], [T('state'),False,URL(r=request,f='state')]] ########################################################### ### auxiliary functions ############################################################ def get_databases(request): dbs={} for key,value in global_env.items(): cond=False try: cond=isinstance(value,GQLDB) except: cond=isinstance(value,SQLDB) if cond: dbs[key]=value return dbs databases=get_databases(None) def eval_in_global_env(text): exec('_ret=%s'%text,{},global_env) return global_env['_ret'] def get_database(request): if request.args and request.args[0] in databases: return eval_in_global_env(request.args[0]) else: session.flash=T('invalid request') redirect(URL(r=request,f='index')) def get_table(request): db=get_database(request) if len(request.args)>1 and request.args[1] in db.tables: return db,request.args[1] else: session.flash=T('invalid request') redirect(URL(r=request,f='index')) def get_query(request): try: return eval_in_global_env(request.vars.query) except Exception: return None ########################################################### ### list all databases and tables ############################################################ def index(): return dict(databases=databases) ########################################################### ### insert a new record ############################################################ def insert(): db,table=get_table(request) form=SQLFORM(db[table]) if form.accepts(request.vars,session): response.flash=T('new record inserted') return dict(form=form) ########################################################### ### list all records in table and insert new record ############################################################ def download(): import os db=get_database(request) filename=request.args[1] print filename ### for GAE only ### table,field=filename.split('.')[:2] if table in db.tables and field in db[table].fields: uploadfield=db[table][field].uploadfield if isinstance(uploadfield,str): from gluon.contenttype import contenttype response.headers['Content-Type']=contenttype(filename) rows=db(db[table][field]==filename).select() return rows[0][uploadfield] ### end for GAE ### path=os.path.join(request.folder,'uploads/',filename) return response.stream(open(path,'rb')) def csv(): import gluon.contenttype response.headers['Content-Type']=gluon.contenttype.contenttype('.csv') query=get_query(request) if not query: return None response.headers['Content-disposition']="attachment; filename=%s_%s.csv"%\ tuple(request.vars.query.split('.')[:2]) return str(db(query).select()) def import_csv(table,file): import csv reader = csv.reader(file) colnames=None for line in reader: if not colnames: colnames=[x[x.find('.')+1:] for x in line] c=[i for i in range(len(line)) if colnames[i]!='id'] else: items=[(colnames[i],line[i]) for i in c] table.insert(**dict(items)) def select(): import re db=get_database(request) dbname=request.args[0] regex=re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)') if request.vars.query: match=regex.match(request.vars.query) if match: request.vars.query='%s.%s.%s==%s' % (request.args[0],match.group('table'),match.group('field'),match.group('value')) else: request.vars.query=session.last_query query=get_query(request) if request.vars.start: start=int(request.vars.start) else: start=0 nrows=0 stop=start+100 table=None rows=[] orderby=request.vars.orderby if orderby: orderby=dbname+'.'+orderby if orderby==session.last_orderby: if orderby[0]=='~': orderby=orderby[1:] else: orderby='~'+orderby session.last_orderby=orderby session.last_query=request.vars.query form=FORM(TABLE(TR('Query:','',INPUT(_style='width:400px',_name='query',_value=request.vars.query or '',requires=IS_NOT_EMPTY())), TR('Update:',INPUT(_name='update_check',_type='checkbox',value=False), INPUT(_style='width:400px',_name='update_fields',_value=request.vars.update_fields or '')), TR('Delete:',INPUT(_name='delete_check',_class='delete',_type='checkbox',value=False),''), TR('','',INPUT(_type='submit',_value='submit')))) if request.vars.csvfile!=None: try: import_csv(db[request.vars.table],request.vars.csvfile.file) response.flash=T('data uploaded') except: response.flash=T('unable to parse csv file') if form.accepts(request.vars,formname=None): regex=re.compile(request.args[0]+'\.(?P<table>\w+)\.id\>0') match=regex.match(form.vars.query.strip()) if match: table=match.group('table') try: nrows=db(query).count() if form.vars.update_check and form.vars.update_fields: db(query).update(**eval_in_global_env('dict(%s)'%form.vars.update_fields)) response.flash=T('%s rows updated',nrows) elif form.vars.delete_check: db(query).delete() response.flash=T('%s rows deleted',nrows) nrows=db(query).count() if orderby: rows=db(query).select(limitby=(start,stop), orderby=eval_in_global_env(orderby)) else: rows=db(query).select(limitby=(start,stop)) except: rows,nrows=[],0 response.flash=T('Invalid Query') return dict(form=form,table=table,start=start,stop=stop,nrows=nrows,rows=rows,query=request.vars.query) ########################################################### ### edit delete one record ############################################################ def update(): db,table=get_table(request) try: id=int(request.args[2]) record=db(db[table].id==id).select()[0] except: session.flash=T('record does not exist') redirect(URL(r=request,f='select',args=request.args[:1],vars=dict(query='%s.%s.id>0'%tuple(request.args[:2])))) form=SQLFORM(db[table],record,deletable=True, linkto=URL(r=request,f='select',args=request.args[:1]), upload=URL(r=request,f='download',args=request.args[:1])) if form.accepts(request.vars,session): response.flash=T('done!') redirect(URL(r=request,f='select',args=request.args[:1],vars=dict(query='%s.%s.id>0'%tuple(request.args[:2])))) return dict(form=form) ########################################################### ### get global variables ############################################################ def state(): return dict()
[ [ 1, 0, 0.0288, 0.0048, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0337, 0.0048, 0, 0.66, 0.04, 919, 0, 1, 0, 0, 919, 0, 0 ], [ 1, 0, 0.0385, 0.0048, 0, 0....
[ "import os, socket, datetime,copy", "import gluon.contenttype", "import gluon.fileutils", "global_env=copy.copy(globals())", "global_env['datetime']=datetime", "http_host = request.env.http_host.split(':')[0]", "remote_addr = request.env.remote_addr", "try: hosts=(http_host, socket.gethostbyname(remot...
if not session.cart: # instantiate new cart session.cart, session.balance = [], 0 session.google_merchant_id = mystore.google_merchant_id response.menu = [ ['Store Front', request.function == 'index', URL(r=request, f='index')], ['About Us', request.function == 'aboutus', URL(r=request, f='aboutus')], ['Contact Us', request.function == 'contactus', URL(r=request, f='contactus')], ['Shopping Cart $%.2f' % float(session.balance), request.function == 'checkout', URL(r=request, f='checkout')] ] def index(): categories = store().select(store.category.ALL, orderby=store.category.name) featured = store(store.product.featured == True).select() return dict(categories=categories,featured=featured) def category(): if not request.args: redirect(URL(r=request, f='index')) category_id = pretty_id(request.args[0]) if len(request.args) == 3: # pagination start, stop = int(request.args[1]), int(request.args[2]) else: start, stop = 0, 20 categories = store().select(store.category.ALL, orderby=store.category.name) category_name = None for category in categories: if category.id == category_id: response.title = category_name = category.name if not category_name: redirect(URL(r=request, f='index')) if start == 0: featured = store(store.product.featured == True)(store.product.category == category_id).select() else: featured = [] ids = [p.id for p in featured] favourites = store(store.product.category == category_id).select(limitby=(start, stop)) favourites = [f for f in favourites if f.id not in ids] return dict(category_name=category_name, categories=categories, featured=featured, favourites=favourites) def product(): if not request.args: redirect(URL(r=request, f='index')) product_id = pretty_id(request.args[0]) products = store(store.product.id == product_id).select() if not products: redirect(URL(r=request, f='index')) product = products[0] response.title = product.name product.update_record(viewed=product.viewed+1) options = store(store.option.product == product.id).select() product_form = FORM( TABLE( [TR(TD(INPUT(_name='option', _value=option.id, _type='checkbox', _onchange="update_price(this, %.2f)" % option.price), option.description), H3('$%.2f' % option.price)) for option in options], TR( 'Price:', H2('$%.2f' % float(product.price), _id='total_price') ), BR(), TH('Qty:', INPUT(_name='quantity', _class='integer', _value=1, _size=1)), INPUT(_type='submit', _value='Add to cart'), ) ) if product_form.accepts(request.vars, session): quantity = int(product_form.vars.quantity) option_ids = product_form.vars.option if not isinstance(option_ids, list): option_ids = [option_ids] if option_ids else [] option_ids = [int(o) for o in option_ids] product.update_record(clicked=product.clicked+1) session.cart.append((product_id, quantity, option_ids)) redirect(URL(r=request, f='checkout')) # post a comment about a product comment_form = SQLFORM(store.comment, fields=['author', 'email', 'body', 'rate']) comment_form.vars.product = product.id if comment_form.accepts(request.vars, session): nc = store(store.comment.product == product.id).count() t = products[0].rating*nc + int(comment_form.vars.rate) products[0].update_record(rating=t/(nc+1)) response.flash = 'comment posted' if comment_form.errors: response.flash = 'invalid comment' comments = store(store.comment.product == product.id).select() better_ids = [row.better for row in store(store.up_sell.product == product.id).select(store.up_sell.better)] related_ids = [row.p2 for row in store(store.cross_sell.p1 == product.id).select()] + [row.p1 for row in store(store.cross_sell.p2 == product.id).select()] suggested = [store.product[id] for id in better_ids + related_ids] # XXXstore(store.product.id.belongs(better_ids + related_ids)).select() return dict(product=product, comments=comments, options=options, suggested=suggested, product_form=product_form, comment_form=comment_form) """ {{ if product.old_price: }} <b>was ${{= '%.2f' % float(product.old_price) }}</b> {{ pass }} </form> """ def remove_from_cart(): # remove product from cart del session.cart[int(request.args[0])] redirect(URL(r=request, f='checkout')) def empty_cart(): # empty cart of all products session.cart.clear() session.balance = 0 redirect(URL(r=request, f='checkout')) def checkout(): order = [] balance = 0 for product_id, qty, option_ids in session.cart: products = store(store.product.id == product_id).select() if products: product = products[0] options = [store.option[id] for id in option_ids]# XXX store(store.option.id.belongs(option_ids)).select() if option_ids else [] total_price = qty * (product.price + sum([option.price for option in options])) order.append((product_id, qty, total_price, product, options)) balance += total_price else: # invalid product pass session.balance = balance # XXX is updating in time? return dict(order=order, merchant_id=session.google_merchant_id) def popup(): return dict() def show(): response.session_id = None import gluon.contenttype, os filename = '/'.join(request.args) response.headers['Content-Type'] = gluon.contenttype.contenttype(filename) # XXX is this path going to be a problem on Windows? return open(os.path.join(request.folder, 'uploads', filename), 'rb').read() def aboutus(): return dict() def contactus(): return dict()
[ [ 4, 0, 0.0203, 0.0203, 0, 0.66, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 14, 1, 0.027, 0.0068, 1, 0.72, 0, 0, 0, 0, 0, 0, 0, 8, 0 ], [ 14, 0, 0.0338, 0.0068, 0, 0.66, 0....
[ "if not session.cart:\n # instantiate new cart\n session.cart, session.balance = [], 0", " session.cart, session.balance = [], 0", "session.google_merchant_id = mystore.google_merchant_id", "response.menu = [\n ['Store Front', request.function == 'index', URL(r=request, f='index')],\n ['About Us', ...
########################################################### ### make sure administrator is on localhost ############################################################ import os from gluon.contenttype import contenttype from gluon.fileutils import check_credentials, listdir if not session.authorized and not request.function=='login': redirect(URL(r=request,f='login')) response.view='manage.html' response.menu=[['manage',True,'/%s/manage/index' % (request.application)], ['logout',False,'/%s/manage/logout' % (request.application)], ['back to store',False,'/%s/default/index' % (request.application)]] ########################################################### ### list all tables in database ############################################################ def login(): response.view='manage/login.html' from gluon.fileutils import check_credentials if check_credentials(request,'admin'): session.authorized=True redirect(URL(r=request,f='index')) return dict() def logout(): session.authorized=False redirect(URL(r=request,c='default',f='index')) def index(): import types as _types _dbs={} for _key,_value in globals().items(): try: if _value.__class__==SQLDB: tables=_dbs[_key]=[] for _tablename in _value.tables(): tables.append((_key,_tablename)) except: pass return dict(dbs=_dbs) ########################################################### ### insert a new record ############################################################ def insert(): try: dbname=request.args[0] db=eval(dbname) table=request.args[1] form=SQLFORM(db[table]) except: redirect(URL(r=request,f='index')) if form.accepts(request.vars,session): response.flash='new record inserted' redirect(URL(r=request,f='select',args=request.args)) elif len(request.vars): response.flash='There are error in your submission form' return dict(form=form) ########################################################### ### list all records in table and insert new record ############################################################ def download(): filename=request.args[0] response.headers['Content-Type']=contenttype(filename) return open(os.path.join(request.folder,'uploads/','%s' % filename),'rb').read() def csv(): import gluon.contenttype, csv, cStringIO response.headers['Content-Type']=gluon.contenttype.contenttype('.csv') try: dbname=request.vars.dbname db=eval(dbname) records=db(request.vars.query).select() except: redirect(URL(r=request,f='index')) s=cStringIO.StringIO() writer = csv.writer(s) writer.writerow(records.colnames) c=range(len(records.colnames)) for i in range(len(records)): writer.writerow([records.response[i][j] for j in c]) ### FILL HERE return s.getvalue() def import_csv(table,file): import csv reader = csv.reader(file) colnames=None for line in reader: if not colnames: colnames=[x[x.find('.')+1:] for x in line] c=[i for i in range(len(line)) if colnames[i]!='id'] else: items=[(colnames[i],line[i]) for i in c] table.insert(**dict(items)) def select(): try: dbname=request.args[0] db=eval(dbname) if not request.vars.query: table=request.args[1] query='%s.id>0' % table else: query=request.vars.query except: redirect(URL(r=request,f='index')) if request.vars.csvfile!=None: try: import_csv(db[table],request.vars.csvfile.file) response.flash='data uploaded' except: reponse.flash='unable to parse csv file' if request.vars.delete_all and request.vars.delete_all_sure=='yes': try: db(query).delete() response.flash='records deleted' except: response.flash='invalid SQL FILTER' elif request.vars.update_string: try: env=dict(db=db,query=query) exec('db(query).update('+request.vars.update_string+')') in env response.flash='records updated' except: response.flash='invalid SQL FILTER or UPDATE STRING' if request.vars.start: start=int(request.vars.start) else: start=0 limitby=(start,start+100) try: records=db(query).select(limitby=limitby) except: response.flash='invalid SQL FILTER' return dict(records='no records',nrecords=0,query=query,start=0) linkto=URL(r=request,f='update/%s'% (dbname)) upload=URL(r=request,f='download') return dict(start=start,query=query,\ nrecords=len(records),\ records=SQLTABLE(records,linkto,upload,_class='sortable')) ########################################################### ### edit delete one record ############################################################ def update(): try: dbname=request.args[0] db=eval(dbname) table=request.args[1] except: redirect(URL(r=request,f='index')) try: id=int(request.args[2]) record=db(db[table].id==id).select()[0] except: redirect(URL(r=request,f='select/%s/%s'%(dbname,table))) form=SQLFORM(db[table],record,deletable=True, linkto=URL(r=request,f='select/'+dbname), upload=URL(r=request,f='download/')) if form.accepts(request.vars,session): response.flash='done!' redirect(URL(r=request,f='select/%s/%s'%(dbname,table))) return dict(form=form) def cleanup(): app=request.application files=listdir('applications/%s/cache/' % app,'',0) for file in files: os.unlink(file) files=listdir('applications/%s/errors/' % app,'',0) for file in files: os.unlink(file) files=listdir('applications/%s/sessions/' % app,'',0) for file in files: os.unlink(file) session.flash="cache, errors and sessions cleaned" redirect(URL(r=request,f='index')) def setup(): response.view='manage/setup.html' form=SQLFORM(store.info,mystore) if form.accepts(request.vars,session): response.flash='that was easy! now go vist your store.' else: response.flash='welcome to the store-in-a-stick setup' return dict(form=form)
[ [ 1, 0, 0.0278, 0.0056, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0333, 0.0056, 0, 0.66, 0.0625, 919, 0, 1, 0, 0, 919, 0, 0 ], [ 1, 0, 0.0389, 0.0056, 0, ...
[ "import os", "from gluon.contenttype import contenttype", "from gluon.fileutils import check_credentials, listdir", "if not session.authorized and not request.function=='login':\n redirect(URL(r=request,f='login'))", " redirect(URL(r=request,f='login'))", "response.view='manage.html'", "response.m...
#!/usr/bin/python2.4 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''The setup and build script for the python-twitter library.''' __author__ = 'python-twitter@googlegroups.com' __version__ = '0.8.5' # The base package metadata to be used by both distutils and setuptools METADATA = dict( name = "python-twitter", version = __version__, py_modules = ['twitter'], author='The Python-Twitter Developers', author_email='python-twitter@googlegroups.com', description='A python wrapper around the Twitter API', license='Apache License 2.0', url='https://github.com/bear/python-twitter', keywords='twitter api', ) # Extra package metadata to be used only if setuptools is installed SETUPTOOLS_METADATA = dict( install_requires = ['setuptools', 'simplejson', 'oauth2'], include_package_data = True, classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Communications :: Chat', 'Topic :: Internet', ], test_suite = 'twitter_test.suite', ) def Read(file): return open(file).read() def BuildLongDescription(): return '\n'.join([Read('README.md'), Read('CHANGES')]) def Main(): # Build the long_description from the README and CHANGES METADATA['long_description'] = BuildLongDescription() # Use setuptools if available, otherwise fallback and use distutils try: import setuptools METADATA.update(SETUPTOOLS_METADATA) setuptools.setup(**METADATA) except ImportError: import distutils.core distutils.core.setup(**METADATA) if __name__ == '__main__': Main()
[ [ 8, 0, 0.2329, 0.0137, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2603, 0.0137, 0, 0.66, 0.125, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.274, 0.0137, 0, 0.66, ...
[ "'''The setup and build script for the python-twitter library.'''", "__author__ = 'python-twitter@googlegroups.com'", "__version__ = '0.8.5'", "METADATA = dict(\n name = \"python-twitter\",\n version = __version__,\n py_modules = ['twitter'],\n author='The Python-Twitter Developers',\n author_email='pyth...
#!/usr/bin/python2.4 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys # parse_qsl moved to urlparse module in v2.6 try: from urlparse import parse_qsl except: from cgi import parse_qsl import oauth2 as oauth REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token' ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token' AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize' SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate' consumer_key = None consumer_secret = None if consumer_key is None or consumer_secret is None: print 'You need to edit this script and provide values for the' print 'consumer_key and also consumer_secret.' print '' print 'The values you need come from Twitter - you need to register' print 'as a developer your "application". This is needed only until' print 'Twitter finishes the idea they have of a way to allow open-source' print 'based libraries to have a token that can be used to generate a' print 'one-time use key that will allow the library to make the request' print 'on your behalf.' print '' sys.exit(1) signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret) oauth_client = oauth.Client(oauth_consumer) print 'Requesting temp token from Twitter' resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'GET') if resp['status'] != '200': print 'Invalid respond from Twitter requesting temp token: %s' % resp['status'] else: request_token = dict(parse_qsl(content)) print '' print 'Please visit this Twitter page and retrieve the pincode to be used' print 'in the next step to obtaining an Authentication Token:' print '' print '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token']) print '' pincode = raw_input('Pincode? ') token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(pincode) print '' print 'Generating and signing request for an access token' print '' oauth_client = oauth.Client(oauth_consumer, token) resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_callback=oob&oauth_verifier=%s' % pincode) access_token = dict(parse_qsl(content)) if resp['status'] != '200': print 'The request for a Token did not succeed: %s' % resp['status'] print access_token else: print 'Your Twitter Access Token key: %s' % access_token['oauth_token'] print ' Access Token secret: %s' % access_token['oauth_token_secret'] print ''
[ [ 1, 0, 0.1978, 0.011, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.2088, 0.011, 0, 0.66, 0.0625, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 7, 0, 0.2582, 0.044, 0, 0.6...
[ "import os", "import sys", "try:\n from urlparse import parse_qsl\nexcept:\n from cgi import parse_qsl", " from urlparse import parse_qsl", " from cgi import parse_qsl", "import oauth2 as oauth", "REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'", "ACCESS_TOKEN_URL = 'https://api...
"""Implementation of JSONEncoder """ import re try: from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii except ImportError: c_encode_basestring_ascii = None try: from simplejson._speedups import make_encoder as c_make_encoder except ImportError: c_make_encoder = None ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') ESCAPE_DCT = { '\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', } for i in range(0x20): ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) # Assume this produces an infinity on all machines (probably not guaranteed) INFINITY = float('1e66666') FLOAT_REPR = repr def encode_basestring(s): """Return a JSON representation of a Python string """ def replace(match): return ESCAPE_DCT[match.group(0)] return '"' + ESCAPE.sub(replace, s) + '"' def py_encode_basestring_ascii(s): """Return an ASCII-only JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): s = match.group(0) try: return ESCAPE_DCT[s] except KeyError: n = ord(s) if n < 0x10000: return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return '\\u%04x\\u%04x' % (s1, s2) return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii class JSONEncoder(object): """Extensible JSON <http://json.org> encoder for Python data structures. Supports the following objects and types by default: +-------------------+---------------+ | Python | JSON | +===================+===============+ | dict | object | +-------------------+---------------+ | list, tuple | array | +-------------------+---------------+ | str, unicode | string | +-------------------+---------------+ | int, long, float | number | +-------------------+---------------+ | True | true | +-------------------+---------------+ | False | false | +-------------------+---------------+ | None | null | +-------------------+---------------+ To extend this to recognize other objects, subclass and implement a ``.default()`` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise ``TypeError``). """ item_separator = ', ' key_separator = ': ' def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None): """Constructor for JSONEncoder, with sensible defaults. If skipkeys is False, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is True, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is True, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is True, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is True, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. None is the most compact representation. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. If specified, default is a function that gets called for objects that can't otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.indent = indent if separators is not None: self.item_separator, self.key_separator = separators if default is not None: self.default = default self.encoding = encoding def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ raise TypeError("%r is not JSON serializable" % (o,)) def encode(self, o): """Return a JSON string representation of a Python data structure. >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}' """ # This is for extremely simple cases and benchmarks. if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) # This doesn't pass the iterator directly to ''.join() because the # exceptions aren't as detailed. The list call should be roughly # equivalent to the PySequence_Fast that ''.join() would do. chunks = self.iterencode(o, _one_shot=True) if not isinstance(chunks, (list, tuple)): chunks = list(chunks) return ''.join(chunks) def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): # Check for specials. Note that this type of test is processor- and/or # platform-specific, so do tests which don't depend on the internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError("Out of range float values are not JSON compliant: %r" % (o,)) return text if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys: _iterencode = c_make_encoder( markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan) else: _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot) return _iterencode(o, 0) def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, ## HACK: hand-optimized bytecode; turn globals into locals False=False, True=True, ValueError=ValueError, basestring=basestring, dict=dict, float=float, id=id, int=int, isinstance=isinstance, list=list, long=long, str=str, tuple=tuple, ): def _iterencode_list(lst, _current_indent_level): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst buf = '[' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) separator = _item_separator + newline_indent buf += newline_indent else: newline_indent = None separator = _item_separator first = True for value in lst: if first: first = False else: buf = separator if isinstance(value, basestring): yield buf + _encoder(value) elif value is None: yield buf + 'null' elif value is True: yield buf + 'true' elif value is False: yield buf + 'false' elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): yield buf + _floatstr(value) else: yield buf if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (' ' * (_indent * _current_indent_level)) yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(dct, _current_indent_level): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) item_separator = _item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = _item_separator first = True if _sort_keys: items = dct.items() items.sort(key=lambda kv: kv[0]) else: items = dct.iteritems() for key, value in items: if isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = _floatstr(key) elif isinstance(key, (int, long)): key = str(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif _skipkeys: continue else: raise TypeError("key %r is not a string" % (key,)) if first: first = False else: yield item_separator yield _encoder(key) yield _key_separator if isinstance(value, basestring): yield _encoder(value) elif value is None: yield 'null' elif value is True: yield 'true' elif value is False: yield 'false' elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): yield _floatstr(value) else: if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (' ' * (_indent * _current_indent_level)) yield '}' if markers is not None: del markers[markerid] def _iterencode(o, _current_indent_level): if isinstance(o, basestring): yield _encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield _floatstr(o) elif isinstance(o, (list, tuple)): for chunk in _iterencode_list(o, _current_indent_level): yield chunk elif isinstance(o, dict): for chunk in _iterencode_dict(o, _current_indent_level): yield chunk else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o o = _default(o) for chunk in _iterencode(o, _current_indent_level): yield chunk if markers is not None: del markers[markerid] return _iterencode
[ [ 8, 0, 0.0035, 0.0046, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0069, 0.0023, 0, 0.66, 0.0667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.015, 0.0092, 0, 0.66,...
[ "\"\"\"Implementation of JSONEncoder\n\"\"\"", "import re", "try:\n from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii\nexcept ImportError:\n c_encode_basestring_ascii = None", " from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii",...
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. :mod:`simplejson` exposes an API familiar to users of the standard library :mod:`marshal` and :mod:`pickle` modules. It is the externally maintained version of the :mod:`json` library contained in Python 2.6, but maintains compatibility with Python 2.4 and Python 2.5 and (currently) has significant performance advantages, even without using the optional C extension for speedups. Encoding basic Python object hierarchies:: >>> import simplejson as json >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print json.dumps("\"foo\bar") "\"foo\bar" >>> print json.dumps(u'\u1234') "\u1234" >>> print json.dumps('\\') "\\" >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> json.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import simplejson as json >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import simplejson as json >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) { "4": 5, "6": 7 } Decoding JSON:: >>> import simplejson as json >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj True >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' True >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> json.load(io)[0] == 'streaming API' True Specializing JSON object decoding:: >>> import simplejson as json >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> import decimal >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1') True Specializing JSON object encoding:: >>> import simplejson as json >>> def encode_complex(obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... raise TypeError("%r is not JSON serializable" % (o,)) ... >>> json.dumps(2 + 1j, default=encode_complex) '[2.0, 1.0]' >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) '[2.0, 1.0]' >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) '[2.0, 1.0]' Using simplejson.tool from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -msimplejson.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -msimplejson.tool Expecting property name: line 1 column 2 (char 2) """ __version__ = '2.0.7' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONEncoder', ] from decoder import JSONDecoder from encoder import JSONEncoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. If the contents of ``fp`` is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed, and should be wrapped with ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` object and passed to ``loads()`` ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s)
[ [ 8, 0, 0.1582, 0.3133, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.3165, 0.0032, 0, 0.66, 0.1, 162, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.3244, 0.0127, 0, 0.66, ...
[ "r\"\"\"JSON (JavaScript Object Notation) <http://json.org> is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\n:mod:`simplejson` exposes an API familiar to users of the standard library\n:mod:`marshal` and :mod:`pickle` modules. It is the externally maintaine...
"""JSON token scanner """ import re try: from simplejson._speedups import make_scanner as c_make_scanner except ImportError: c_make_scanner = None __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration return _scan_once make_scanner = c_make_scanner or py_make_scanner
[ [ 8, 0, 0.0231, 0.0308, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0462, 0.0154, 0, 0.66, 0.1667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.0846, 0.0615, 0, 0.66...
[ "\"\"\"JSON token scanner\n\"\"\"", "import re", "try:\n from simplejson._speedups import make_scanner as c_make_scanner\nexcept ImportError:\n c_make_scanner = None", " from simplejson._speedups import make_scanner as c_make_scanner", " c_make_scanner = None", "__all__ = ['make_scanner']", ...
#!/usr/bin/python2.4 '''Post a message to twitter''' __author__ = 'dewitt@google.com' import ConfigParser import getopt import os import sys import twitter USAGE = '''Usage: tweet [options] message This script posts a message to Twitter. Options: -h --help : print this help --consumer-key : the twitter consumer key --consumer-secret : the twitter consumer secret --access-key : the twitter access token key --access-secret : the twitter access token secret --encoding : the character set encoding used in input strings, e.g. "utf-8". [optional] Documentation: If either of the command line flags are not present, the environment variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your consumer_key or consumer_secret, respectively. If neither the command line flags nor the enviroment variables are present, the .tweetrc file, if it exists, can be used to set the default consumer_key and consumer_secret. The file should contain the following three lines, replacing *consumer_key* with your consumer key, and *consumer_secret* with your consumer secret: A skeletal .tweetrc file: [Tweet] consumer_key: *consumer_key* consumer_secret: *consumer_password* access_key: *access_key* access_secret: *access_password* ''' def PrintUsageAndExit(): print USAGE sys.exit(2) def GetConsumerKeyEnv(): return os.environ.get("TWEETUSERNAME", None) def GetConsumerSecretEnv(): return os.environ.get("TWEETPASSWORD", None) def GetAccessKeyEnv(): return os.environ.get("TWEETACCESSKEY", None) def GetAccessSecretEnv(): return os.environ.get("TWEETACCESSSECRET", None) class TweetRc(object): def __init__(self): self._config = None def GetConsumerKey(self): return self._GetOption('consumer_key') def GetConsumerSecret(self): return self._GetOption('consumer_secret') def GetAccessKey(self): return self._GetOption('access_key') def GetAccessSecret(self): return self._GetOption('access_secret') def _GetOption(self, option): try: return self._GetConfig().get('Tweet', option) except: return None def _GetConfig(self): if not self._config: self._config = ConfigParser.ConfigParser() self._config.read(os.path.expanduser('~/.tweetrc')) return self._config def main(): try: shortflags = 'h' longflags = ['help', 'consumer-key=', 'consumer-secret=', 'access-key=', 'access-secret=', 'encoding='] opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags) except getopt.GetoptError: PrintUsageAndExit() consumer_keyflag = None consumer_secretflag = None access_keyflag = None access_secretflag = None encoding = None for o, a in opts: if o in ("-h", "--help"): PrintUsageAndExit() if o in ("--consumer-key"): consumer_keyflag = a if o in ("--consumer-secret"): consumer_secretflag = a if o in ("--access-key"): access_keyflag = a if o in ("--access-secret"): access_secretflag = a if o in ("--encoding"): encoding = a message = ' '.join(args) if not message: PrintUsageAndExit() rc = TweetRc() consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey() consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret() access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey() access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret() if not consumer_key or not consumer_secret or not access_key or not access_secret: PrintUsageAndExit() api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_key, access_token_secret=access_secret, input_encoding=encoding) try: status = api.PostUpdate(message) except UnicodeDecodeError: print "Your message could not be encoded. Perhaps it contains non-ASCII characters? " print "Try explicitly specifying the encoding with the --encoding flag" sys.exit(2) print "%s just posted: %s" % (status.user.name, status.text) if __name__ == "__main__": main()
[ [ 8, 0, 0.0213, 0.0071, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0355, 0.0071, 0, 0.66, 0.0667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.0496, 0.0071, 0, 0.66,...
[ "'''Post a message to twitter'''", "__author__ = 'dewitt@google.com'", "import ConfigParser", "import getopt", "import os", "import sys", "import twitter", "USAGE = '''Usage: tweet [options] message\n\n This script posts a message to Twitter.\n\n Options:\n\n -h --help : print this help\n --co...
#!/usr/bin/python2.4 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''A class that defines the default URL Shortener. TinyURL is provided as the default and as an example. ''' import urllib # Change History # # 2010-05-16 # TinyURL example and the idea for this comes from a bug filed by # acolorado with patch provided by ghills. Class implementation # was done by bear. # # Issue 19 http://code.google.com/p/python-twitter/issues/detail?id=19 # class ShortenURL(object): '''Helper class to make URL Shortener calls if/when required''' def __init__(self, userid=None, password=None): '''Instantiate a new ShortenURL object Args: userid: userid for any required authorization call [optional] password: password for any required authorization call [optional] ''' self.userid = userid self.password = password def Shorten(self, longURL): '''Call TinyURL API and returned shortened URL result Args: longURL: URL string to shorten Returns: The shortened URL as a string Note: longURL is required and no checks are made to ensure completeness ''' result = None f = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % longURL) try: result = f.read() finally: f.close() return result
[ [ 8, 0, 0.2606, 0.0563, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3099, 0.0141, 0, 0.66, 0.5, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 3, 0, 0.7535, 0.507, 0, 0.66, ...
[ "'''A class that defines the default URL Shortener.\n\nTinyURL is provided as the default and as an example.\n'''", "import urllib", "class ShortenURL(object):\n '''Helper class to make URL Shortener calls if/when required'''\n def __init__(self,\n userid=None,\n password=N...
maxVf = 200 # Generating the header head = """// Copyright qiuc12@gmail.com // This file is generated autmatically by python. DONT MODIFY IT! #pragma once #include <OleAuto.h> class FakeDispatcher; HRESULT DualProcessCommand(int commandId, FakeDispatcher *disp, ...); extern "C" void DualProcessCommandWrap(); class FakeDispatcherBase : public IDispatch { private:""" pattern = """ \tvirtual HRESULT __stdcall fv{0}(char x) {{ \t\tva_list va = &x; \t\tHRESULT ret = ProcessCommand({0}, va); \t\tva_end(va); \t\treturn ret; \t}} """ pattern = """ \tvirtual HRESULT __stdcall fv{0}();""" end = """ protected: \tconst static int kMaxVf = {0}; }}; """ f = open("FakeDispatcherBase.h", "w") f.write(head) for i in range(0, maxVf): f.write(pattern.format(i)) f.write(end.format(maxVf)) f.close() head = """; Copyright qiuc12@gmail.com ; This file is generated automatically by python. DON'T MODIFY IT! """ f = open("FakeDispatcherBase.asm", "w") f.write(head) f.write(".386\n") f.write(".model flat\n") f.write("_DualProcessCommandWrap proto\n") ObjFormat = "?fv{0}@FakeDispatcherBase@@EAGJXZ" for i in range(0, maxVf): f.write("PUBLIC " + ObjFormat.format(i) + "\n") f.write(".code\n") for i in range(0, maxVf): f.write(ObjFormat.format(i) + " proc\n") f.write(" push {0}\n".format(i)) f.write(" jmp _DualProcessCommandWrap\n") f.write(ObjFormat.format(i) + " endp\n") f.write("\nend\n") f.close()
[ [ 14, 0, 0.0172, 0.0172, 0, 0.66, 0, 302, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 0, 0.1466, 0.1724, 0, 0.66, 0.0476, 217, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.319, 0.1379, 0, 0....
[ "maxVf = 200", "head = \"\"\"// Copyright qiuc12@gmail.com\n// This file is generated autmatically by python. DONT MODIFY IT!\n\n#pragma once\n#include <OleAuto.h>\nclass FakeDispatcher;\nHRESULT DualProcessCommand(int commandId, FakeDispatcher *disp, ...);\nextern \"C\" void DualProcessCommandWrap();", "patter...
#! /usr/bin/python # -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # Name: Error.py # Purpose: error exception class # Author: Fabien Marteau <fabien.marteau@armadeus.com> # Created: 30/04/2008 #----------------------------------------------------------------------------- # Copyright (2008) Armadeus Systems # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #----------------------------------------------------------------------------- # Revision list : # # Date By Changes # #----------------------------------------------------------------------------- __doc__ = "" __versionTime__ = "30/04/2008" __author__ = "Fabien Marteau <fabien.marteau@armadeus.com>" INFO = 2 WARNING = 1 ERROR = 0 class Error(Exception): """ Manage specific error attributes: message -- the message level -- the exception level """ def __init__(self,message,level): self.message = message self.level = level Exception.__init__(self,message) def __repr__(self): return self.message def __str__(self): if self.level == 0: return "[ERROR] : " + self.message elif self.level == 1: return "[WARNING]: " + self.message else: return "[INFO]: " + self.message def setLevel(self,level): self.level = int(str(level))
[ [ 14, 0, 0.4923, 0.0154, 0, 0.66, 0, 155, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.5077, 0.0154, 0, 0.66, 0.1667, 587, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.5231, 0.0154, 0, 0...
[ "__doc__ = \"\"", "__versionTime__ = \"30/04/2008\"", "__author__ = \"Fabien Marteau <fabien.marteau@armadeus.com>\"", "INFO = 2", "WARNING = 1", "ERROR = 0", "class Error(Exception):\n \"\"\" Manage specific error\n\n attributes:\n message -- the message\n level -- the except...
#! /usr/bin/python # -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # Name: general_test.py # Purpose: General testing program for unioc-ng # Author: Fabien Marteau <fabien.marteau@armadeus.com> # Created: 22/11/2008 #----------------------------------------------------------------------------- # Copyright (2008) Armadeus Systems # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #----------------------------------------------------------------------------- # Revision list : # # Date By Changes # #----------------------------------------------------------------------------- __doc__ = "" __author__ = "Fabien Marteau <fabien.marteau@armadeus.com>" import sys import os from error import Error from bootstrap import BootStrap def consoleDump(tid): while 1: sys.stdout.write(tid.read(1)) def waitForPositiveResponse(charok,error_message): response = raw_input().strip() if response != charok: raise Error(error_message,0) if __name__ == "__main__": print "********************************************" print "* This is the testing program for unioc-ng.*" print "* Version 0.1 *" print "* To ensure a good card functionning *" print "* follow the instruction. *" print "********************************************" print " Powering ..." print "* Program atmega bootstrap with JTAG" raw_input() print "* Configure fpga with GPIO ip" raw_input() print "* Branch shunts" raw_input()
[ [ 14, 0, 0.4848, 0.0152, 0, 0.66, 0, 155, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.5, 0.0152, 0, 0.66, 0.125, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.5303, 0.0152, 0, 0.66, ...
[ "__doc__ = \"\"", "__author__ = \"Fabien Marteau <fabien.marteau@armadeus.com>\"", "import sys", "import os", "from error import Error", "from bootstrap import BootStrap", "def consoleDump(tid):\n while 1:\n sys.stdout.write(tid.read(1))", " while 1:\n sys.stdout.write(tid.read...
#! /usr/bin/python # -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # Name: bootstrap.py # Purpose: bootstrap class used to communicate with atmega # Author: Fabien Marteau <fabien.marteau@armadeus.com> # Created: 22/11/2008 #----------------------------------------------------------------------------- # Copyright (2008) Armadeus Systems # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #----------------------------------------------------------------------------- # Revision list : # # Date By Changes # #----------------------------------------------------------------------------- __doc__ = "" __author__ = "Fabien Marteau <fabien.marteau@armadeus.com>" import sys import time import os try: import serial except ImportError: print "[ERROR]Pyserial is needed : "+\ "http://pyserial.wiki.sourceforge.net/pySerial" #exit() class BootStrap(): def __init__(self): print "TODO: init BootStrap" def put(self,address,value): """ Write a value on atmega register address : int (16bits) value : int (8bits) """ print "TODO: put %02X at %04X"%(value,address) def get(self,address): """ Read a value from atmega register address : int (16bits) return int """ print "TODO : read value at address %04X"%address
[ [ 14, 0, 0.5333, 0.0167, 0, 0.66, 0, 155, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.55, 0.0167, 0, 0.66, 0.1667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.5833, 0.0167, 0, 0.66...
[ "__doc__ = \"\"", "__author__ = \"Fabien Marteau <fabien.marteau@armadeus.com>\"", "import sys", "import time", "import os", "try:\n import serial\nexcept ImportError:\n \"http://pyserial.wiki.sourceforge.net/pySerial\"", " import serial", " \"http://pyserial.wiki.sourceforge.n...
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------# #Author: Robin David #Matriculation: 10014500 #License: Creative Commons #-----------------------------------# import string from impacket import ImpactDecoder, ImpactPacket #packet manipulation module from datetime import datetime from packet_function import * import re #regular expression from email.parser import Parser #parser for mime type ! class MSN: def __init__(self,ip, parano=False): self.local_ip = ip self.ip_packet = list() self.tcp_packet = list() self.paranoid_mode = parano #will determine if all tcp packets are examined or just dst.port=1863 def analyse(self, data): if isIP(data): self.ip_packet = getIPPacket(data) else: return #jump early to do not slow down traffic if isUDP(self.ip_packet): udp_packet = getTCPorUDPPacket(self.ip_packet) if getDstPortUDP(udp_packet) == 53: data = udp_packet.get_data_as_string() if re.search("login.live.com", data): print datetime.now().strftime("%b %d, %H:%M:%S")," DNS request for login.live.com" elif re.search("messenger.hotmail.com", data): print datetime.now().strftime("%b %d, %H:%M:%S")," DNS request for messenger.hotmail.com" elif re.search("g.live.com", data): print datetime.now().strftime("%b %d, %H:%M:%S")," DNS request for g.live.com" pass elif isTCP(self.ip_packet): #All packet from here are normally TCP ! self.tcp_packet = getTCPorUDPPacket(self.ip_packet) if self.tcp_packet.get_th_dport() == 80 or self.tcp_packet.get_th_sport() == 80: return #Directly skip all http traffic to avoid useless processing on it because we are not interested in elif self.tcp_packet.get_th_dport() == 443 and getDecFlagsValue(self.tcp_packet) == 2: #If we try to connect on a website in https, syn flags to trigger alarm once per connection(if it is) if self.test_https_auth(): print datetime.now().strftime("%b %d, %H:%M:%S")," Connection on a HTTPS MSN Server (maybe to pick up a authentication ticket)" elif self.tcp_packet.get_th_dport() == 1863 or self.tcp_packet.get_th_sport() == 1863: self.test_msn_connection() self.test_file_transfert() elif self.paranoid_mode: self.test_file_transfert() else: pass else: pass def test_https_auth(self): dst_ip = getDstIp(self.ip_packet) msn_servers = ['65.54.165.137','65.54.165.141','65.54.165.139','65.54.165.169','65.54.165.179','65.54.186.77','65.54.165.136','65.54.165.177'] isMSNserver = False #determined with an nslookup should be updated if needed for ip in msn_servers: if dst_ip == ip: isMSNserver = True return isMSNserver def test_msn_connection(self): data = self.tcp_packet.get_data_as_string() if getDecFlagsValue(self.tcp_packet) == 2: print "\n",datetime.now().strftime("%b %d, %H:%M:%S")," New Connection on the TCP 1863 (which can be a MSN notification server)!" if re.match("VER 1 ",data): #VER 1 MSNP21 MSNP20 MSNP19 MSNP18 MSNP17 CVR0 #VER 1 MSNP21 if getSrcIp(self.ip_packet) == self.local_ip: print datetime.now().strftime("%b %d, %H:%M:%S")," VER Step detected Client->Server (Protocol version exchange)" else: print datetime.now().strftime("%b %d, %H:%M:%S")," VER Step detected Server->Client (Version %s used)" % (data[6:12]) elif re.match("CVR 2 ",data): #CVR 2 0x0409 winnt 6.1.0 i386 MSNMSGR 15.4.3502.0922 MSNMSGR me@hotmail.fr #VmVyc2lvbjogMQ0KWGZyQ291bnQ6IDENCg== if getSrcIp(self.ip_packet) == self.local_ip: print datetime.now().strftime("%b %d, %H:%M:%S")," CVR Step detected Client->Server (client information sent)" infos = data[6:].split(" ") print "\t\tLocale:%s\tOS:%s(%s)\tArchi:%s\tClient:%s(%s)\tAddress:%s" % (infos[0],infos[1],infos[2],infos[3],infos[4],infos[5],infos[7]) #Further about locale :http://krafft.com/scripts/deluxe-calendar/lcid_chart.htm else: print datetime.now().strftime("%b %d, %H:%M:%S")," CVR Step detected Server->Client (Stored client information received)" elif re.match("USR 3 ",data): #USR 3 SSO I me@hotmail.fr #alert user send the initiation message of authentication infos = data.split(" ") print datetime.now().strftime("%b %d, %H:%M:%S")," USR Step detected (Initiation authentication) method:%s\tAddress:%s" % (infos[2],infos[4]) elif re.match("USR 4 ",data) and not re.match("USR 4 OK ",data): # USR 4 SSO S t=E (s for subsequent and the ticket attached) #Note SSO was not used to match because I don't know but, may other methods exists print datetime.now().strftime("%b %d, %H:%M:%S")," USR Step detected (Ticket/Token dispatching)" elif re.match("USR 4 OK ",data): #USR 4 OK me@hotmail.fr 1 0 infos= data.split(" ") print datetime.now().strftime("%b %d, %H:%M:%S")," Address %s connected and authenticated to msn !" % (infos[3]) def test_file_transfert(self): data = self.tcp_packet.get_data_as_string() new_data = ["",data] if re.search("INVITE",data):#keep only invitation message notFound = True while notFound: #separate first line from the rest and delete useless \n before new_data = new_data[1].split("\n",1) if re.search("INVITE",new_data[0]): notFound = False else: pass #First element in the array contain invitation message #The second contain mime elements #should do this, because otherwise the mime parsing fail due to the first line which is not mime if len(new_data) >= 2:#otherwise parsing below can fail due to out of range new_data[1] = re.sub(r'\r\n\r\n','\r\n',new_data[1]) mime_elts = Parser().parsestr(new_data[1],True) # parse the packet as mime type (True means ignore payload) #So to get payload remove True, and it is accessible with mime_elts.get_payload() if mime_elts['EUF-GUID'] == "{5D3E02AB-6190-11D3-BBBB-00C04F795683}": #This is signature of file transfert ! if re.search("INVITE",new_data[0]):#if it was really an invitation print "\n",datetime.now().strftime("%b %d, %H:%M:%S")," File transfer invitation !" print "\t\tFrom:%s\n\t\tTo:%s" % (mime_elts['From'],mime_elts['To']) #additional tests if mime_elts['CSeq'] == "0 " or mime_elts['CSeq'] == "0":#windows live put a space other client not print "\t\tAnymore CSeq = 0 (file transfer signature)" if mime_elts['appID'] == '2': print "\t\tAnymore AppID = 2 (file transfer signature)" if mime_elts['Content-Type'] == "application/x-msnmsgr-sessionreqbody": print "\t\tAnymore Content-Type = application/x-msnmsgr-sessionreqbody (file transfer signature)\n"
[ [ 1, 0, 0.0544, 0.0068, 0, 0.66, 0, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 1, 0, 0.0612, 0.0068, 0, 0.66, 0.1667, 752, 0, 2, 0, 0, 752, 0, 0 ], [ 1, 0, 0.068, 0.0068, 0, 0...
[ "import string", "from impacket import ImpactDecoder, ImpactPacket #packet manipulation module", "from datetime import datetime", "from packet_function import *", "import re #regular expression", "from email.parser import Parser #parser for mime type !", "class MSN:\n\tdef __init__(self,ip, parano=False...
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------# #Author: Robin David #Matriculation: 10014500 #License: Creative Commons #-----------------------------------# import string from impacket import ImpactDecoder, ImpactPacket #packet manipulation module from datetime import datetime from packet_function import * import re #regular expression from email.parser import Parser #parser for mime type ! class MSN: def __init__(self,ip, parano=False): self.local_ip = ip self.ip_packet = list() self.tcp_packet = list() self.paranoid_mode = parano #will determine if all tcp packets are examined or just dst.port=1863 def analyse(self, data): if isIP(data): self.ip_packet = getIPPacket(data) else: return #jump early to do not slow down traffic if isUDP(self.ip_packet): udp_packet = getTCPorUDPPacket(self.ip_packet) if getDstPortUDP(udp_packet) == 53: data = udp_packet.get_data_as_string() if re.search("login.live.com", data): print datetime.now().strftime("%b %d, %H:%M:%S")," DNS request for login.live.com" elif re.search("messenger.hotmail.com", data): print datetime.now().strftime("%b %d, %H:%M:%S")," DNS request for messenger.hotmail.com" elif re.search("g.live.com", data): print datetime.now().strftime("%b %d, %H:%M:%S")," DNS request for g.live.com" pass elif isTCP(self.ip_packet): #All packet from here are normally TCP ! self.tcp_packet = getTCPorUDPPacket(self.ip_packet) if self.tcp_packet.get_th_dport() == 80 or self.tcp_packet.get_th_sport() == 80: return #Directly skip all http traffic to avoid useless processing on it because we are not interested in elif self.tcp_packet.get_th_dport() == 443 and getDecFlagsValue(self.tcp_packet) == 2: #If we try to connect on a website in https, syn flags to trigger alarm once per connection(if it is) if self.test_https_auth(): print datetime.now().strftime("%b %d, %H:%M:%S")," Connection on a HTTPS MSN Server (maybe to pick up a authentication ticket)" elif self.tcp_packet.get_th_dport() == 1863 or self.tcp_packet.get_th_sport() == 1863: self.test_msn_connection() self.test_file_transfert() elif self.paranoid_mode: self.test_file_transfert() else: pass else: pass def test_https_auth(self): dst_ip = getDstIp(self.ip_packet) msn_servers = ['65.54.165.137','65.54.165.141','65.54.165.139','65.54.165.169','65.54.165.179','65.54.186.77','65.54.165.136','65.54.165.177'] isMSNserver = False #determined with an nslookup should be updated if needed for ip in msn_servers: if dst_ip == ip: isMSNserver = True return isMSNserver def test_msn_connection(self): data = self.tcp_packet.get_data_as_string() if getDecFlagsValue(self.tcp_packet) == 2: print "\n",datetime.now().strftime("%b %d, %H:%M:%S")," New Connection on the TCP 1863 (which can be a MSN notification server)!" if re.match("VER 1 ",data): #VER 1 MSNP21 MSNP20 MSNP19 MSNP18 MSNP17 CVR0 #VER 1 MSNP21 if getSrcIp(self.ip_packet) == self.local_ip: print datetime.now().strftime("%b %d, %H:%M:%S")," VER Step detected Client->Server (Protocol version exchange)" else: print datetime.now().strftime("%b %d, %H:%M:%S")," VER Step detected Server->Client (Version %s used)" % (data[6:12]) elif re.match("CVR 2 ",data): #CVR 2 0x0409 winnt 6.1.0 i386 MSNMSGR 15.4.3502.0922 MSNMSGR me@hotmail.fr #VmVyc2lvbjogMQ0KWGZyQ291bnQ6IDENCg== if getSrcIp(self.ip_packet) == self.local_ip: print datetime.now().strftime("%b %d, %H:%M:%S")," CVR Step detected Client->Server (client information sent)" infos = data[6:].split(" ") print "\t\tLocale:%s\tOS:%s(%s)\tArchi:%s\tClient:%s(%s)\tAddress:%s" % (infos[0],infos[1],infos[2],infos[3],infos[4],infos[5],infos[7]) #Further about locale :http://krafft.com/scripts/deluxe-calendar/lcid_chart.htm else: print datetime.now().strftime("%b %d, %H:%M:%S")," CVR Step detected Server->Client (Stored client information received)" elif re.match("USR 3 ",data): #USR 3 SSO I me@hotmail.fr #alert user send the initiation message of authentication infos = data.split(" ") print datetime.now().strftime("%b %d, %H:%M:%S")," USR Step detected (Initiation authentication) method:%s\tAddress:%s" % (infos[2],infos[4]) elif re.match("USR 4 ",data) and not re.match("USR 4 OK ",data): # USR 4 SSO S t=E (s for subsequent and the ticket attached) #Note SSO was not used to match because I don't know but, may other methods exists print datetime.now().strftime("%b %d, %H:%M:%S")," USR Step detected (Ticket/Token dispatching)" elif re.match("USR 4 OK ",data): #USR 4 OK me@hotmail.fr 1 0 infos= data.split(" ") print datetime.now().strftime("%b %d, %H:%M:%S")," Address %s connected and authenticated to msn !" % (infos[3]) def test_file_transfert(self): data = self.tcp_packet.get_data_as_string() new_data = ["",data] if re.search("INVITE",data):#keep only invitation message notFound = True while notFound: #separate first line from the rest and delete useless \n before new_data = new_data[1].split("\n",1) if re.search("INVITE",new_data[0]): notFound = False else: pass #First element in the array contain invitation message #The second contain mime elements #should do this, because otherwise the mime parsing fail due to the first line which is not mime if len(new_data) >= 2:#otherwise parsing below can fail due to out of range new_data[1] = re.sub(r'\r\n\r\n','\r\n',new_data[1]) mime_elts = Parser().parsestr(new_data[1],True) # parse the packet as mime type (True means ignore payload) #So to get payload remove True, and it is accessible with mime_elts.get_payload() if mime_elts['EUF-GUID'] == "{5D3E02AB-6190-11D3-BBBB-00C04F795683}": #This is signature of file transfert ! if re.search("INVITE",new_data[0]):#if it was really an invitation print "\n",datetime.now().strftime("%b %d, %H:%M:%S")," File transfer invitation !" print "\t\tFrom:%s\n\t\tTo:%s" % (mime_elts['From'],mime_elts['To']) #additional tests if mime_elts['CSeq'] == "0 " or mime_elts['CSeq'] == "0":#windows live put a space other client not print "\t\tAnymore CSeq = 0 (file transfer signature)" if mime_elts['appID'] == '2': print "\t\tAnymore AppID = 2 (file transfer signature)" if mime_elts['Content-Type'] == "application/x-msnmsgr-sessionreqbody": print "\t\tAnymore Content-Type = application/x-msnmsgr-sessionreqbody (file transfer signature)\n"
[ [ 1, 0, 0.0544, 0.0068, 0, 0.66, 0, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 1, 0, 0.0612, 0.0068, 0, 0.66, 0.1667, 752, 0, 2, 0, 0, 752, 0, 0 ], [ 1, 0, 0.068, 0.0068, 0, 0...
[ "import string", "from impacket import ImpactDecoder, ImpactPacket #packet manipulation module", "from datetime import datetime", "from packet_function import *", "import re #regular expression", "from email.parser import Parser #parser for mime type !", "class MSN:\n\tdef __init__(self,ip, parano=False...
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------# #Author: Robin David #Matriculation: 10014500 #License: Creative Commons #-----------------------------------# import string from impacket import ImpactDecoder, ImpactPacket #packet manipulation module from datetime import datetime import time import re #regular expression from email.parser import Parser #parser for mime type ! from packet_function import * class BBC: def __init__(self): self.ip_packet = list() self.tcp_packet = list() self.lastmessage = "" def analyse(self, data): if isIP(data): self.ip_packet = getIPPacket(data) if isTCP(self.ip_packet): self.tcp_packet = getTCPorUDPPacket(self.ip_packet) if self.tcp_packet.get_th_dport() == 1935: data = self.tcp_packet.get_data_as_string() if re.search("www..?bbc.c.?o.uk",data): #if pattern found then try to pick up path of stream url_path = re.findall("(?!www\.bbc\/c.?o\.uk).*www\..?bbc\.co\.uk(.*)...$",data)[0] ip = getSrcIp(self.ip_packet) mess = "IP:%s\t Stream:www.bbc.co.uk%s" % (ip,url_path) if mess != self.lastmessage: #avoid redundancy of message for same request and same ip print datetime.now().strftime("%b %d, %H:%M:%S"),mess self.lastmessage = mess
[ [ 1, 0, 0.2, 0.025, 0, 0.66, 0, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 1, 0, 0.225, 0.025, 0, 0.66, 0.1429, 752, 0, 2, 0, 0, 752, 0, 0 ], [ 1, 0, 0.25, 0.025, 0, 0.66, ...
[ "import string", "from impacket import ImpactDecoder, ImpactPacket #packet manipulation module", "from datetime import datetime", "import time", "import re #regular expression", "from email.parser import Parser #parser for mime type !", "from packet_function import *", "class BBC:\n\tdef __init__(self...
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------# #Author: Robin David #Matriculation: 10014500 #License: Creative Commons #-----------------------------------# import string from impacket import ImpactDecoder, ImpactPacket #packet manipulation module from datetime import datetime import time from packet_function import * import re #regular expression from email.parser import Parser #parser for mime type ! class Triplet: def __init__(self, port_nb): self.port = port_nb self.packet_list = list() #list of the date of the reception of the 3 packets ! def getPortName(self): return self.port def getIndex(self): return len(self.packet_list)-1 def addElement(self,time): if len(self.packet_list) < 3: self.packet_list.append(time) def getElement(self,i): return self.packet_list[i] class SuspectIP: def __init__(self,ip,time): self.ip = ip self.nbpacket = 0 self.triplet_list = list() #List of triplet (normally maximum 10 triplets (3*10)) self.firstpacket_received = time self.lastpacket_received = 0 def getIP(self): return self.ip def getLastPacketDate(self): return self.lastpacket_received def addPacket(self,port,time_recep): self.lastpacket_received = time_recep exist = False for i in range(len(self.triplet_list)): #check all element of the list of suspect if self.triplet_list[i].getPortName() == port: #if already in the list exist = True cur = self.triplet_list[i] cur.addElement(time_recep) #add element to the triplet index = cur.getIndex() self.nbpacket += 1 print datetime.now().strftime("%b %d, %H:%M:%S"),"IP:%s Port:%s(%s/3) BOTNET packet Date:%s(difference:%s) Total count:%s" % (self.ip,cur.getPortName(),index+1, datetime.fromtimestamp(cur.getElement(index)).strftime("%H:%M:%S"), cur.getElement(index) - cur.getElement(index-1),self.nbpacket) if self.nbpacket == 30: print datetime.now().strftime("%b %d, %H:%M:%S"),"IP:%s all packet from botnet signature detected in %s seconds" % (self.ip, self.lastpacket_received - self.firstpacket_received) if not exist: new = Triplet(port) #Creation of the new suspect and push it in the list new.addElement(time_recep) self.nbpacket += 1 self.triplet_list.append(new) print datetime.now().strftime("%b %d, %H:%M:%S"),"IP:%s Port:%s(1/3) BOTNET packet Date:%s Total count:%s" % (self.ip,port,datetime.fromtimestamp(time_recep).strftime("%H:%M:%S"),self.nbpacket) #--------------------# # Class Botnet # #--------------------# class botnet: def __init__(self): self.ip_packet = list() self.tcp_packet = list() self.suspect_list = list() def analyse(self, data,time_recep): if isIP(data): self.ip_packet = getIPPacket(data) srcip = getSrcIp(self.ip_packet) if isTCP(self.ip_packet): self.tcp_packet = getTCPorUDPPacket(self.ip_packet) if self.tcp_packet.get_th_dport() == 1013 and getDstIp(self.ip_packet) == "192.168.5.13" and getDecFlagsValue(self.tcp_packet) == 2: #if the packet matches all requirements self.processPacket(srcip,self.tcp_packet.get_th_sport(),time_recep) self.update_list() def processPacket(self, ip,port,rec_time): exist = False for suspect in self.suspect_list: if suspect.getIP() == ip: exist = True suspect.addPacket(port,rec_time) if not exist: new = SuspectIP(ip,rec_time) new.addPacket(port,rec_time) self.suspect_list.append(new) def update_list(self): for i in range(len(self.suspect_list)): if (time.time() - self.suspect_list[i].getLastPacketDate()) > 240: #if the last packet of the host is older than 4 minutes print self.suspect_list[i].getIP()," reseted (botnet)" del self.suspect_list[i]
[ [ 1, 0, 0.0727, 0.0091, 0, 0.66, 0, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 1, 0, 0.0818, 0.0091, 0, 0.66, 0.1111, 752, 0, 2, 0, 0, 752, 0, 0 ], [ 1, 0, 0.0909, 0.0091, 0, ...
[ "import string", "from impacket import ImpactDecoder, ImpactPacket #packet manipulation module", "from datetime import datetime", "import time", "from packet_function import *", "import re #regular expression", "from email.parser import Parser #parser for mime type !", "class Triplet:\n\tdef __init__(...
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------# #Author: Robin David #Matriculation: 10014500 #License: Creative Commons #-----------------------------------# import string from impacket import ImpactDecoder, ImpactPacket #packet manipulation module from datetime import datetime import time from packet_function import * import re #regular expression from email.parser import Parser #parser for mime type ! class Triplet: def __init__(self, port_nb): self.port = port_nb self.packet_list = list() #list of the date of the reception of the 3 packets ! def getPortName(self): return self.port def getIndex(self): return len(self.packet_list)-1 def addElement(self,time): if len(self.packet_list) < 3: self.packet_list.append(time) def getElement(self,i): return self.packet_list[i] class SuspectIP: def __init__(self,ip,time): self.ip = ip self.nbpacket = 0 self.triplet_list = list() #List of triplet (normally maximum 10 triplets (3*10)) self.firstpacket_received = time self.lastpacket_received = 0 def getIP(self): return self.ip def getLastPacketDate(self): return self.lastpacket_received def addPacket(self,port,time_recep): self.lastpacket_received = time_recep exist = False for i in range(len(self.triplet_list)): #check all element of the list of suspect if self.triplet_list[i].getPortName() == port: #if already in the list exist = True cur = self.triplet_list[i] cur.addElement(time_recep) #add element to the triplet index = cur.getIndex() self.nbpacket += 1 print datetime.now().strftime("%b %d, %H:%M:%S"),"IP:%s Port:%s(%s/3) BOTNET packet Date:%s(difference:%s) Total count:%s" % (self.ip,cur.getPortName(),index+1, datetime.fromtimestamp(cur.getElement(index)).strftime("%H:%M:%S"), cur.getElement(index) - cur.getElement(index-1),self.nbpacket) if self.nbpacket == 30: print datetime.now().strftime("%b %d, %H:%M:%S"),"IP:%s all packet from botnet signature detected in %s seconds" % (self.ip, self.lastpacket_received - self.firstpacket_received) if not exist: new = Triplet(port) #Creation of the new suspect and push it in the list new.addElement(time_recep) self.nbpacket += 1 self.triplet_list.append(new) print datetime.now().strftime("%b %d, %H:%M:%S"),"IP:%s Port:%s(1/3) BOTNET packet Date:%s Total count:%s" % (self.ip,port,datetime.fromtimestamp(time_recep).strftime("%H:%M:%S"),self.nbpacket) #--------------------# # Class Botnet # #--------------------# class botnet: def __init__(self): self.ip_packet = list() self.tcp_packet = list() self.suspect_list = list() def analyse(self, data,time_recep): if isIP(data): self.ip_packet = getIPPacket(data) srcip = getSrcIp(self.ip_packet) if isTCP(self.ip_packet): self.tcp_packet = getTCPorUDPPacket(self.ip_packet) if self.tcp_packet.get_th_dport() == 1013 and getDstIp(self.ip_packet) == "192.168.5.13" and getDecFlagsValue(self.tcp_packet) == 2: #if the packet matches all requirements self.processPacket(srcip,self.tcp_packet.get_th_sport(),time_recep) self.update_list() def processPacket(self, ip,port,rec_time): exist = False for suspect in self.suspect_list: if suspect.getIP() == ip: exist = True suspect.addPacket(port,rec_time) if not exist: new = SuspectIP(ip,rec_time) new.addPacket(port,rec_time) self.suspect_list.append(new) def update_list(self): for i in range(len(self.suspect_list)): if (time.time() - self.suspect_list[i].getLastPacketDate()) > 240: #if the last packet of the host is older than 4 minutes print self.suspect_list[i].getIP()," reseted (botnet)" del self.suspect_list[i]
[ [ 1, 0, 0.0727, 0.0091, 0, 0.66, 0, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 1, 0, 0.0818, 0.0091, 0, 0.66, 0.1111, 752, 0, 2, 0, 0, 752, 0, 0 ], [ 1, 0, 0.0909, 0.0091, 0, ...
[ "import string", "from impacket import ImpactDecoder, ImpactPacket #packet manipulation module", "from datetime import datetime", "import time", "from packet_function import *", "import re #regular expression", "from email.parser import Parser #parser for mime type !", "class Triplet:\n\tdef __init__(...
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------# #Author: Robin David #Matriculation: 10014500 #License: Creative Commons #-----------------------------------# import string from impacket import ImpactDecoder, ImpactPacket #packet manipulation module from datetime import datetime import time import re #regular expression from email.parser import Parser #parser for mime type ! from packet_function import * class BBC: def __init__(self): self.ip_packet = list() self.tcp_packet = list() self.lastmessage = "" def analyse(self, data): if isIP(data): self.ip_packet = getIPPacket(data) if isTCP(self.ip_packet): self.tcp_packet = getTCPorUDPPacket(self.ip_packet) if self.tcp_packet.get_th_dport() == 1935: data = self.tcp_packet.get_data_as_string() if re.search("www..?bbc.c.?o.uk",data): #if pattern found then try to pick up path of stream url_path = re.findall("(?!www\.bbc\/c.?o\.uk).*www\..?bbc\.co\.uk(.*)...$",data)[0] ip = getSrcIp(self.ip_packet) mess = "IP:%s\t Stream:www.bbc.co.uk%s" % (ip,url_path) if mess != self.lastmessage: #avoid redundancy of message for same request and same ip print datetime.now().strftime("%b %d, %H:%M:%S"),mess self.lastmessage = mess
[ [ 1, 0, 0.2, 0.025, 0, 0.66, 0, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 1, 0, 0.225, 0.025, 0, 0.66, 0.1429, 752, 0, 2, 0, 0, 752, 0, 0 ], [ 1, 0, 0.25, 0.025, 0, 0.66, ...
[ "import string", "from impacket import ImpactDecoder, ImpactPacket #packet manipulation module", "from datetime import datetime", "import time", "import re #regular expression", "from email.parser import Parser #parser for mime type !", "from packet_function import *", "class BBC:\n\tdef __init__(self...
#!/usr/bin/python # Copyright 2011 Google, Inc. All Rights Reserved. # simple script to walk source tree looking for third-party licenses # dumps resulting html page to stdout import os, re, mimetypes, sys # read source directories to scan from command line SOURCE = sys.argv[1:] # regex to find /* */ style comment blocks COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL) # regex used to detect if comment block is a license COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE) COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE) EXCLUDE_TYPES = [ "application/xml", "image/png", ] # list of known licenses; keys are derived by stripping all whitespace and # forcing to lowercase to help combine multiple files that have same license. KNOWN_LICENSES = {} class License: def __init__(self, license_text): self.license_text = license_text self.filenames = [] # add filename to the list of files that have the same license text def add_file(self, filename): if filename not in self.filenames: self.filenames.append(filename) LICENSE_KEY = re.compile(r"[^\w]") def find_license(license_text): # TODO(alice): a lot these licenses are almost identical Apache licenses. # Most of them differ in origin/modifications. Consider combining similar # licenses. license_key = LICENSE_KEY.sub("", license_text).lower() if license_key not in KNOWN_LICENSES: KNOWN_LICENSES[license_key] = License(license_text) return KNOWN_LICENSES[license_key] def discover_license(exact_path, filename): # when filename ends with LICENSE, assume applies to filename prefixed if filename.endswith("LICENSE"): with open(exact_path) as file: license_text = file.read() target_filename = filename[:-len("LICENSE")] if target_filename.endswith("."): target_filename = target_filename[:-1] find_license(license_text).add_file(target_filename) return None # try searching for license blocks in raw file mimetype = mimetypes.guess_type(filename) if mimetype in EXCLUDE_TYPES: return None with open(exact_path) as file: raw_file = file.read() # include comments that have both "license" and "copyright" in the text for comment in COMMENT_BLOCK.finditer(raw_file): comment = comment.group(1) if COMMENT_LICENSE.search(comment) is None: continue if COMMENT_COPYRIGHT.search(comment) is None: continue find_license(comment).add_file(filename) for source in SOURCE: for root, dirs, files in os.walk(source): for name in files: discover_license(os.path.join(root, name), name) print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>" for license in KNOWN_LICENSES.values(): print "<h3>Notices for files:</h3><ul>" filenames = license.filenames filenames.sort() for filename in filenames: print "<li>%s</li>" % (filename) print "</ul>" print "<pre>%s</pre>" % license.license_text print "</body></html>"
[ [ 1, 0, 0.0816, 0.0102, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 14, 0, 0.1224, 0.0102, 0, 0.66, 0.0714, 792, 6, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1531, 0.0102, 0, ...
[ "import os, re, mimetypes, sys", "SOURCE = sys.argv[1:]", "COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)", "COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)", "COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)", "EXCLUDE_TYPES = [\n \"application/xml\...
#! /usr/bin/python # $Id: testupnpigd.py,v 1.4 2008/10/11 10:27:20 nanard Exp $ # MiniUPnP project # Author : Thomas Bernard # This Sample code is public domain. # website : http://miniupnp.tuxfamily.org/ # import the python miniupnpc module import miniupnpc import socket import BaseHTTPServer # function definition def list_redirections(): i = 0 while True: p = u.getgenericportmapping(i) if p==None: break print i, p i = i + 1 #define the handler class for HTTP connections class handler_class(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write("OK MON GARS") # create the object u = miniupnpc.UPnP() #print 'inital(default) values :' #print ' discoverdelay', u.discoverdelay #print ' lanaddr', u.lanaddr #print ' multicastif', u.multicastif #print ' minissdpdsocket', u.minissdpdsocket u.discoverdelay = 200; try: print 'Discovering... delay=%ums' % u.discoverdelay ndevices = u.discover() print ndevices, 'device(s) detected' # select an igd u.selectigd() # display information about the IGD and the internet connection print 'local ip address :', u.lanaddr externalipaddress = u.externalipaddress() print 'external ip address :', externalipaddress print u.statusinfo(), u.connectiontype() #instanciate a HTTPd object. The port is assigned by the system. httpd = BaseHTTPServer.HTTPServer((u.lanaddr, 0), handler_class) eport = httpd.server_port # find a free port for the redirection r = u.getspecificportmapping(eport, 'TCP') while r != None and eport < 65536: eport = eport + 1 r = u.getspecificportmapping(eport, 'TCP') print 'trying to redirect %s port %u TCP => %s port %u TCP' % (externalipaddress, eport, u.lanaddr, httpd.server_port) b = u.addportmapping(eport, 'TCP', u.lanaddr, httpd.server_port, 'UPnP IGD Tester port %u' % eport, '') if b: print 'Success. Now waiting for some HTTP request on http://%s:%u' % (externalipaddress ,eport) try: httpd.handle_request() httpd.server_close() except KeyboardInterrupt, details: print "CTRL-C exception!", details b = u.deleteportmapping(eport, 'TCP') if b: print 'Successfully deleted port mapping' else: print 'Failed to remove port mapping' else: print 'Failed' httpd.server_close() except Exception, e: print 'Exception :', e
[ [ 1, 0, 0.0526, 0.0526, 0, 0.66, 0, 772, 0, 1, 0, 0, 772, 0, 0 ], [ 1, 0, 0.1053, 0.0526, 0, 0.66, 0.25, 687, 0, 1, 0, 0, 687, 0, 0 ], [ 1, 0, 0.1579, 0.0526, 0, 0....
[ "import miniupnpc", "import socket", "import BaseHTTPServer", "def list_redirections():\n\ti = 0\n\twhile True:\n\t\tp = u.getgenericportmapping(i)\n\t\tif p==None:\n\t\t\tbreak\n\t\tprint(i, p)\n\t\ti = i + 1", "\ti = 0", "\twhile True:\n\t\tp = u.getgenericportmapping(i)\n\t\tif p==None:\n\t\t\tbreak\n\...
#! /usr/bin/python # $Id: setup.py,v 1.3 2009/04/17 20:59:42 nanard Exp $ # the MiniUPnP Project (c) 2007 Thomas Bernard # http://miniupnp.tuxfamily.org/ or http://miniupnp.free.fr/ # # python script to build the miniupnpc module under unix # # replace libminiupnpc.a by libminiupnpc.so for shared library usage from distutils.core import setup, Extension setup(name="miniupnpc", version="1.3", ext_modules=[ Extension(name="miniupnpc", sources=["miniupnpcmodule.c"], extra_objects=["libminiupnpc.a"]) ])
[ [ 1, 0, 0.6, 0.0667, 0, 0.66, 0, 152, 0, 2, 0, 0, 152, 0, 0 ], [ 8, 0, 0.8, 0.3333, 0, 0.66, 1, 234, 3, 3, 0, 0, 0, 0, 2 ] ]
[ "from distutils.core import setup, Extension", "setup(name=\"miniupnpc\", version=\"1.3\",\n ext_modules=[\n\t Extension(name=\"miniupnpc\", sources=[\"miniupnpcmodule.c\"],\n\t\t\t extra_objects=[\"libminiupnpc.a\"])\n\t\t\t ])" ]
#! /usr/bin/python # $Id: setupmingw32.py,v 1.1 2007/06/12 23:04:13 nanard Exp $ # the MiniUPnP Project (c) 2007 Thomas Bernard # http://miniupnp.tuxfamily.org/ or http://miniupnp.free.fr/ # # python script to build the miniupnpc module under unix # from distutils.core import setup, Extension setup(name="miniupnpc", version="1.0-RC6", ext_modules=[ Extension(name="miniupnpc", sources=["miniupnpcmodule.c"], libraries=["ws2_32"], extra_objects=["libminiupnpc.a"]) ])
[ [ 1, 0, 0.5333, 0.0667, 0, 0.66, 0, 152, 0, 2, 0, 0, 152, 0, 0 ], [ 8, 0, 0.7667, 0.4, 0, 0.66, 1, 234, 3, 3, 0, 0, 0, 0, 2 ] ]
[ "from distutils.core import setup, Extension", "setup(name=\"miniupnpc\", version=\"1.0-RC6\",\n ext_modules=[\n\t Extension(name=\"miniupnpc\", sources=[\"miniupnpcmodule.c\"],\n\t libraries=[\"ws2_32\"],\n\t\t\t extra_objects=[\"libminiupnpc.a\"])\n\t\t\t ])" ]
#! /usr/bin/python # MiniUPnP project # Author : Thomas Bernard # This Sample code is public domain. # website : http://miniupnp.tuxfamily.org/ # import the python miniupnpc module import miniupnpc import sys # create the object u = miniupnpc.UPnP() print 'inital(default) values :' print ' discoverdelay', u.discoverdelay print ' lanaddr', u.lanaddr print ' multicastif', u.multicastif print ' minissdpdsocket', u.minissdpdsocket u.discoverdelay = 200; #u.minissdpdsocket = '../minissdpd/minissdpd.sock' # discovery process, it usualy takes several seconds (2 seconds or more) print 'Discovering... delay=%ums' % u.discoverdelay print u.discover(), 'device(s) detected' # select an igd try: u.selectigd() except Exception, e: print 'Exception :', e sys.exit(1) # display information about the IGD and the internet connection print 'local ip address :', u.lanaddr print 'external ip address :', u.externalipaddress() print u.statusinfo(), u.connectiontype() #print u.addportmapping(64000, 'TCP', # '192.168.1.166', 63000, 'port mapping test', '') #print u.deleteportmapping(64000, 'TCP') port = 0 proto = 'UDP' # list the redirections : i = 0 while True: p = u.getgenericportmapping(i) if p==None: break print i, p (port, proto, (ihost,iport), desc, c, d, e) = p #print port, desc i = i + 1 print u.getspecificportmapping(port, proto)
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 772, 0, 1, 0, 0, 772, 0, 0 ], [ 1, 0, 0.6667, 0.3333, 0, 0.66, 1, 509, 0, 1, 0, 0, 509, 0, 0 ] ]
[ "import miniupnpc", "import sys" ]
#!/usr/bin/python2.4 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys # parse_qsl moved to urlparse module in v2.6 try: from urlparse import parse_qsl except: from cgi import parse_qsl import oauth2 as oauth REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token' ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token' AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize' SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate' consumer_key = None consumer_secret = None if consumer_key is None or consumer_secret is None: print 'You need to edit this script and provide values for the' print 'consumer_key and also consumer_secret.' print '' print 'The values you need come from Twitter - you need to register' print 'as a developer your "application". This is needed only until' print 'Twitter finishes the idea they have of a way to allow open-source' print 'based libraries to have a token that can be used to generate a' print 'one-time use key that will allow the library to make the request' print 'on your behalf.' print '' sys.exit(1) signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret) oauth_client = oauth.Client(oauth_consumer) print 'Requesting temp token from Twitter' resp, content = oauth_client.request(REQUEST_TOKEN_URL, 'GET') if resp['status'] != '200': print 'Invalid respond from Twitter requesting temp token: %s' % resp['status'] else: request_token = dict(parse_qsl(content)) print '' print 'Please visit this Twitter page and retrieve the pincode to be used' print 'in the next step to obtaining an Authentication Token:' print '' print '%s?oauth_token=%s' % (AUTHORIZATION_URL, request_token['oauth_token']) print '' pincode = raw_input('Pincode? ') token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(pincode) print '' print 'Generating and signing request for an access token' print '' oauth_client = oauth.Client(oauth_consumer, token) resp, content = oauth_client.request(ACCESS_TOKEN_URL, method='POST', body='oauth_callback=oob&oauth_verifier=%s' % pincode) access_token = dict(parse_qsl(content)) if resp['status'] != '200': print 'The request for a Token did not succeed: %s' % resp['status'] print access_token else: print 'Your Twitter Access Token key: %s' % access_token['oauth_token'] print ' Access Token secret: %s' % access_token['oauth_token_secret'] print ''
[ [ 1, 0, 0.1978, 0.011, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.2088, 0.011, 0, 0.66, 0.0625, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 7, 0, 0.2582, 0.044, 0, 0.6...
[ "import os", "import sys", "try:\n from urlparse import parse_qsl\nexcept:\n from cgi import parse_qsl", " from urlparse import parse_qsl", " from cgi import parse_qsl", "import oauth2 as oauth", "REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'", "ACCESS_TOKEN_URL = 'https://api...
#!/usr/bin/python2.4 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''A class that defines the default URL Shortener. TinyURL is provided as the default and as an example. ''' import urllib # Change History # # 2010-05-16 # TinyURL example and the idea for this comes from a bug filed by # acolorado with patch provided by ghills. Class implementation # was done by bear. # # Issue 19 http://code.google.com/p/python-twitter/issues/detail?id=19 # class ShortenURL(object): '''Helper class to make URL Shortener calls if/when required''' def __init__(self, userid=None, password=None): '''Instantiate a new ShortenURL object Args: userid: userid for any required authorization call [optional] password: password for any required authorization call [optional] ''' self.userid = userid self.password = password def Shorten(self, longURL): '''Call TinyURL API and returned shortened URL result Args: longURL: URL string to shorten Returns: The shortened URL as a string Note: longURL is required and no checks are made to ensure completeness ''' result = None f = urllib.urlopen("http://tinyurl.com/api-create.php?url=%s" % longURL) try: result = f.read() finally: f.close() return result
[ [ 8, 0, 0.2606, 0.0563, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3099, 0.0141, 0, 0.66, 0.5, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 3, 0, 0.7535, 0.507, 0, 0.66, ...
[ "'''A class that defines the default URL Shortener.\n\nTinyURL is provided as the default and as an example.\n'''", "import urllib", "class ShortenURL(object):\n '''Helper class to make URL Shortener calls if/when required'''\n def __init__(self,\n userid=None,\n password=N...
#!/usr/bin/python2.4 '''Post a message to twitter''' __author__ = 'dewitt@google.com' import ConfigParser import getopt import os import sys import twitter USAGE = '''Usage: tweet [options] message This script posts a message to Twitter. Options: -h --help : print this help --consumer-key : the twitter consumer key --consumer-secret : the twitter consumer secret --access-key : the twitter access token key --access-secret : the twitter access token secret --encoding : the character set encoding used in input strings, e.g. "utf-8". [optional] Documentation: If either of the command line flags are not present, the environment variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your consumer_key or consumer_secret, respectively. If neither the command line flags nor the enviroment variables are present, the .tweetrc file, if it exists, can be used to set the default consumer_key and consumer_secret. The file should contain the following three lines, replacing *consumer_key* with your consumer key, and *consumer_secret* with your consumer secret: A skeletal .tweetrc file: [Tweet] consumer_key: *consumer_key* consumer_secret: *consumer_password* access_key: *access_key* access_secret: *access_password* ''' def PrintUsageAndExit(): print USAGE sys.exit(2) def GetConsumerKeyEnv(): return os.environ.get("TWEETUSERNAME", None) def GetConsumerSecretEnv(): return os.environ.get("TWEETPASSWORD", None) def GetAccessKeyEnv(): return os.environ.get("TWEETACCESSKEY", None) def GetAccessSecretEnv(): return os.environ.get("TWEETACCESSSECRET", None) class TweetRc(object): def __init__(self): self._config = None def GetConsumerKey(self): return self._GetOption('consumer_key') def GetConsumerSecret(self): return self._GetOption('consumer_secret') def GetAccessKey(self): return self._GetOption('access_key') def GetAccessSecret(self): return self._GetOption('access_secret') def _GetOption(self, option): try: return self._GetConfig().get('Tweet', option) except: return None def _GetConfig(self): if not self._config: self._config = ConfigParser.ConfigParser() self._config.read(os.path.expanduser('~/.tweetrc')) return self._config def main(): try: shortflags = 'h' longflags = ['help', 'consumer-key=', 'consumer-secret=', 'access-key=', 'access-secret=', 'encoding='] opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags) except getopt.GetoptError: PrintUsageAndExit() consumer_keyflag = None consumer_secretflag = None access_keyflag = None access_secretflag = None encoding = None for o, a in opts: if o in ("-h", "--help"): PrintUsageAndExit() if o in ("--consumer-key"): consumer_keyflag = a if o in ("--consumer-secret"): consumer_secretflag = a if o in ("--access-key"): access_keyflag = a if o in ("--access-secret"): access_secretflag = a if o in ("--encoding"): encoding = a message = ' '.join(args) if not message: PrintUsageAndExit() rc = TweetRc() consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey() consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret() access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey() access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret() if not consumer_key or not consumer_secret or not access_key or not access_secret: PrintUsageAndExit() api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_key, access_token_secret=access_secret, input_encoding=encoding) try: status = api.PostUpdate(message) except UnicodeDecodeError: print "Your message could not be encoded. Perhaps it contains non-ASCII characters? " print "Try explicitly specifying the encoding with the --encoding flag" sys.exit(2) print "%s just posted: %s" % (status.user.name, status.text) if __name__ == "__main__": main()
[ [ 8, 0, 0.0213, 0.0071, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0355, 0.0071, 0, 0.66, 0.0667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.0496, 0.0071, 0, 0.66,...
[ "'''Post a message to twitter'''", "__author__ = 'dewitt@google.com'", "import ConfigParser", "import getopt", "import os", "import sys", "import twitter", "USAGE = '''Usage: tweet [options] message\n\n This script posts a message to Twitter.\n\n Options:\n\n -h --help : print this help\n --co...
"""JSON token scanner """ import re try: from simplejson._speedups import make_scanner as c_make_scanner except ImportError: c_make_scanner = None __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration return _scan_once make_scanner = c_make_scanner or py_make_scanner
[ [ 8, 0, 0.0231, 0.0308, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0462, 0.0154, 0, 0.66, 0.1667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.0846, 0.0615, 0, 0.66...
[ "\"\"\"JSON token scanner\n\"\"\"", "import re", "try:\n from simplejson._speedups import make_scanner as c_make_scanner\nexcept ImportError:\n c_make_scanner = None", " from simplejson._speedups import make_scanner as c_make_scanner", " c_make_scanner = None", "__all__ = ['make_scanner']", ...
"""Implementation of JSONEncoder """ import re try: from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii except ImportError: c_encode_basestring_ascii = None try: from simplejson._speedups import make_encoder as c_make_encoder except ImportError: c_make_encoder = None ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') ESCAPE_DCT = { '\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', } for i in range(0x20): ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) # Assume this produces an infinity on all machines (probably not guaranteed) INFINITY = float('1e66666') FLOAT_REPR = repr def encode_basestring(s): """Return a JSON representation of a Python string """ def replace(match): return ESCAPE_DCT[match.group(0)] return '"' + ESCAPE.sub(replace, s) + '"' def py_encode_basestring_ascii(s): """Return an ASCII-only JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): s = match.group(0) try: return ESCAPE_DCT[s] except KeyError: n = ord(s) if n < 0x10000: return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) return '\\u%04x\\u%04x' % (s1, s2) return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii class JSONEncoder(object): """Extensible JSON <http://json.org> encoder for Python data structures. Supports the following objects and types by default: +-------------------+---------------+ | Python | JSON | +===================+===============+ | dict | object | +-------------------+---------------+ | list, tuple | array | +-------------------+---------------+ | str, unicode | string | +-------------------+---------------+ | int, long, float | number | +-------------------+---------------+ | True | true | +-------------------+---------------+ | False | false | +-------------------+---------------+ | None | null | +-------------------+---------------+ To extend this to recognize other objects, subclass and implement a ``.default()`` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise ``TypeError``). """ item_separator = ', ' key_separator = ': ' def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None): """Constructor for JSONEncoder, with sensible defaults. If skipkeys is False, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is True, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is True, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is True, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is True, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. None is the most compact representation. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. If specified, default is a function that gets called for objects that can't otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.indent = indent if separators is not None: self.item_separator, self.key_separator = separators if default is not None: self.default = default self.encoding = encoding def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ raise TypeError("%r is not JSON serializable" % (o,)) def encode(self, o): """Return a JSON string representation of a Python data structure. >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}' """ # This is for extremely simple cases and benchmarks. if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) # This doesn't pass the iterator directly to ''.join() because the # exceptions aren't as detailed. The list call should be roughly # equivalent to the PySequence_Fast that ''.join() would do. chunks = self.iterencode(o, _one_shot=True) if not isinstance(chunks, (list, tuple)): chunks = list(chunks) return ''.join(chunks) def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): # Check for specials. Note that this type of test is processor- and/or # platform-specific, so do tests which don't depend on the internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError("Out of range float values are not JSON compliant: %r" % (o,)) return text if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys: _iterencode = c_make_encoder( markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan) else: _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot) return _iterencode(o, 0) def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, ## HACK: hand-optimized bytecode; turn globals into locals False=False, True=True, ValueError=ValueError, basestring=basestring, dict=dict, float=float, id=id, int=int, isinstance=isinstance, list=list, long=long, str=str, tuple=tuple, ): def _iterencode_list(lst, _current_indent_level): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst buf = '[' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) separator = _item_separator + newline_indent buf += newline_indent else: newline_indent = None separator = _item_separator first = True for value in lst: if first: first = False else: buf = separator if isinstance(value, basestring): yield buf + _encoder(value) elif value is None: yield buf + 'null' elif value is True: yield buf + 'true' elif value is False: yield buf + 'false' elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): yield buf + _floatstr(value) else: yield buf if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (' ' * (_indent * _current_indent_level)) yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(dct, _current_indent_level): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) item_separator = _item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = _item_separator first = True if _sort_keys: items = dct.items() items.sort(key=lambda kv: kv[0]) else: items = dct.iteritems() for key, value in items: if isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = _floatstr(key) elif isinstance(key, (int, long)): key = str(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif _skipkeys: continue else: raise TypeError("key %r is not a string" % (key,)) if first: first = False else: yield item_separator yield _encoder(key) yield _key_separator if isinstance(value, basestring): yield _encoder(value) elif value is None: yield 'null' elif value is True: yield 'true' elif value is False: yield 'false' elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): yield _floatstr(value) else: if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (' ' * (_indent * _current_indent_level)) yield '}' if markers is not None: del markers[markerid] def _iterencode(o, _current_indent_level): if isinstance(o, basestring): yield _encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield _floatstr(o) elif isinstance(o, (list, tuple)): for chunk in _iterencode_list(o, _current_indent_level): yield chunk elif isinstance(o, dict): for chunk in _iterencode_dict(o, _current_indent_level): yield chunk else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o o = _default(o) for chunk in _iterencode(o, _current_indent_level): yield chunk if markers is not None: del markers[markerid] return _iterencode
[ [ 8, 0, 0.0035, 0.0046, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0069, 0.0023, 0, 0.66, 0.0667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.015, 0.0092, 0, 0.66,...
[ "\"\"\"Implementation of JSONEncoder\n\"\"\"", "import re", "try:\n from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii\nexcept ImportError:\n c_encode_basestring_ascii = None", " from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii",...
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. :mod:`simplejson` exposes an API familiar to users of the standard library :mod:`marshal` and :mod:`pickle` modules. It is the externally maintained version of the :mod:`json` library contained in Python 2.6, but maintains compatibility with Python 2.4 and Python 2.5 and (currently) has significant performance advantages, even without using the optional C extension for speedups. Encoding basic Python object hierarchies:: >>> import simplejson as json >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print json.dumps("\"foo\bar") "\"foo\bar" >>> print json.dumps(u'\u1234') "\u1234" >>> print json.dumps('\\') "\\" >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> json.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import simplejson as json >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import simplejson as json >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) { "4": 5, "6": 7 } Decoding JSON:: >>> import simplejson as json >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj True >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' True >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> json.load(io)[0] == 'streaming API' True Specializing JSON object decoding:: >>> import simplejson as json >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> import decimal >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1') True Specializing JSON object encoding:: >>> import simplejson as json >>> def encode_complex(obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... raise TypeError("%r is not JSON serializable" % (o,)) ... >>> json.dumps(2 + 1j, default=encode_complex) '[2.0, 1.0]' >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) '[2.0, 1.0]' >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) '[2.0, 1.0]' Using simplejson.tool from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -msimplejson.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -msimplejson.tool Expecting property name: line 1 column 2 (char 2) """ __version__ = '2.0.7' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONEncoder', ] from decoder import JSONDecoder from encoder import JSONEncoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is ``False``, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is ``False``, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (skipkeys is False and ensure_ascii is True and check_circular is True and allow_nan is True and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. If the contents of ``fp`` is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed, and should be wrapped with ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` object and passed to ``loads()`` ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, **kw): """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s)
[ [ 8, 0, 0.1582, 0.3133, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.3165, 0.0032, 0, 0.66, 0.1, 162, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.3244, 0.0127, 0, 0.66, ...
[ "r\"\"\"JSON (JavaScript Object Notation) <http://json.org> is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\n:mod:`simplejson` exposes an API familiar to users of the standard library\n:mod:`marshal` and :mod:`pickle` modules. It is the externally maintaine...
#!/usr/bin/python2.4 # # Copyright 2007 The Python-Twitter Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''The setup and build script for the python-twitter library.''' __author__ = 'python-twitter@googlegroups.com' __version__ = '0.8.5' # The base package metadata to be used by both distutils and setuptools METADATA = dict( name = "python-twitter", version = __version__, py_modules = ['twitter'], author='The Python-Twitter Developers', author_email='python-twitter@googlegroups.com', description='A python wrapper around the Twitter API', license='Apache License 2.0', url='https://github.com/bear/python-twitter', keywords='twitter api', ) # Extra package metadata to be used only if setuptools is installed SETUPTOOLS_METADATA = dict( install_requires = ['setuptools', 'simplejson', 'oauth2'], include_package_data = True, classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Communications :: Chat', 'Topic :: Internet', ], test_suite = 'twitter_test.suite', ) def Read(file): return open(file).read() def BuildLongDescription(): return '\n'.join([Read('README.md'), Read('CHANGES')]) def Main(): # Build the long_description from the README and CHANGES METADATA['long_description'] = BuildLongDescription() # Use setuptools if available, otherwise fallback and use distutils try: import setuptools METADATA.update(SETUPTOOLS_METADATA) setuptools.setup(**METADATA) except ImportError: import distutils.core distutils.core.setup(**METADATA) if __name__ == '__main__': Main()
[ [ 8, 0, 0.2329, 0.0137, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2603, 0.0137, 0, 0.66, 0.125, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.274, 0.0137, 0, 0.66, ...
[ "'''The setup and build script for the python-twitter library.'''", "__author__ = 'python-twitter@googlegroups.com'", "__version__ = '0.8.5'", "METADATA = dict(\n name = \"python-twitter\",\n version = __version__,\n py_modules = ['twitter'],\n author='The Python-Twitter Developers',\n author_email='pyth...
''' Module which brings history information about files from Mercurial. @author: Rodrigo Damazio ''' import re import subprocess REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*') def _GetOutputLines(args): ''' Runs an external process and returns its output as a list of lines. @param args: the arguments to run ''' process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines = True, shell = False) output = process.communicate()[0] return output.splitlines() def FillMercurialRevisions(filename, parsed_file): ''' Fills the revs attribute of all strings in the given parsed file with a list of revisions that touched the lines corresponding to that string. @param filename: the name of the file to get history for @param parsed_file: the parsed file to modify ''' # Take output of hg annotate to get revision of each line output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename]) # Create a map of line -> revision (key is list index, line 0 doesn't exist) line_revs = ['dummy'] for line in output_lines: rev_match = REVISION_REGEX.match(line) if not rev_match: raise 'Unexpected line of output from hg: %s' % line rev_hash = rev_match.group('hash') line_revs.append(rev_hash) for str in parsed_file.itervalues(): # Get the lines that correspond to each string start_line = str['startLine'] end_line = str['endLine'] # Get the revisions that touched those lines revs = [] for line_number in range(start_line, end_line + 1): revs.append(line_revs[line_number]) # Merge with any revisions that were already there # (for explict revision specification) if 'revs' in str: revs += str['revs'] # Assign the revisions to the string str['revs'] = frozenset(revs) def DoesRevisionSuperceed(filename, rev1, rev2): ''' Tells whether a revision superceeds another. This essentially means that the older revision is an ancestor of the newer one. This also returns True if the two revisions are the same. @param rev1: the revision that may be superceeding the other @param rev2: the revision that may be superceeded @return: True if rev1 superceeds rev2 or they're the same ''' if rev1 == rev2: return True # TODO: Add filename args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename] output_lines = _GetOutputLines(args) return rev2 in output_lines def NewestRevision(filename, rev1, rev2): ''' Returns which of two revisions is closest to the head of the repository. If none of them is the ancestor of the other, then we return either one. @param rev1: the first revision @param rev2: the second revision ''' if DoesRevisionSuperceed(filename, rev1, rev2): return rev1 return rev2
[ [ 8, 0, 0.0319, 0.0532, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0745, 0.0106, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0851, 0.0106, 0, 0.66...
[ "'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''", "import re", "import subprocess", "REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')", "def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines...
#!/usr/bin/python ''' Entry point for My Tracks i18n tool. @author: Rodrigo Damazio ''' import mytracks.files import mytracks.translate import mytracks.validate import sys def Usage(): print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0] print 'Commands are:' print ' cleanup' print ' translate' print ' validate' sys.exit(1) def Translate(languages): ''' Asks the user to interactively translate any missing or oudated strings from the files for the given languages. @param languages: the languages to translate ''' validator = mytracks.validate.Validator(languages) validator.Validate() missing = validator.missing_in_lang() outdated = validator.outdated_in_lang() for lang in languages: untranslated = missing[lang] + outdated[lang] if len(untranslated) == 0: continue translator = mytracks.translate.Translator(lang) translator.Translate(untranslated) def Validate(languages): ''' Computes and displays errors in the string files for the given languages. @param languages: the languages to compute for ''' validator = mytracks.validate.Validator(languages) validator.Validate() error_count = 0 if (validator.valid()): print 'All files OK' else: for lang, missing in validator.missing_in_master().iteritems(): print 'Missing in master, present in %s: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, missing in validator.missing_in_lang().iteritems(): print 'Missing in %s, present in master: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, outdated in validator.outdated_in_lang().iteritems(): print 'Outdated in %s: %s:' % (lang, str(outdated)) error_count = error_count + len(outdated) return error_count if __name__ == '__main__': argv = sys.argv argc = len(argv) if argc < 2: Usage() languages = mytracks.files.GetAllLanguageFiles() if argc == 3: langs = set(argv[2:]) if not langs.issubset(languages): raise 'Language(s) not found' # Filter just to the languages specified languages = dict((lang, lang_file) for lang, lang_file in languages.iteritems() if lang in langs or lang == 'en' ) cmd = argv[1] if cmd == 'translate': Translate(languages) elif cmd == 'validate': error_count = Validate(languages) else: Usage() error_count = 0 print '%d errors found.' % error_count
[ [ 8, 0, 0.0417, 0.0521, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0833, 0.0104, 0, 0.66, 0.125, 640, 0, 1, 0, 0, 640, 0, 0 ], [ 1, 0, 0.0938, 0.0104, 0, 0.66,...
[ "'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''", "import mytracks.files", "import mytracks.translate", "import mytracks.validate", "import sys", "def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p...
''' Module which prompts the user for translations and saves them. TODO: implement @author: Rodrigo Damazio ''' class Translator(object): ''' classdocs ''' def __init__(self, language): ''' Constructor ''' self._language = language def Translate(self, string_names): print string_names
[ [ 8, 0, 0.1905, 0.3333, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.7143, 0.619, 0, 0.66, 1, 229, 0, 2, 0, 0, 186, 0, 1 ], [ 8, 1, 0.5238, 0.1429, 1, 0, 0, ...
[ "'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''", "class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor", " '''\n classdocs\n '''", " def __init__(self, language):\n '''...
''' Module which compares languague files to the master file and detects issues. @author: Rodrigo Damazio ''' import os from mytracks.parser import StringsParser import mytracks.history class Validator(object): def __init__(self, languages): ''' Builds a strings file validator. Params: @param languages: a dictionary mapping each language to its corresponding directory ''' self._langs = {} self._master = None self._language_paths = languages parser = StringsParser() for lang, lang_dir in languages.iteritems(): filename = os.path.join(lang_dir, 'strings.xml') parsed_file = parser.Parse(filename) mytracks.history.FillMercurialRevisions(filename, parsed_file) if lang == 'en': self._master = parsed_file else: self._langs[lang] = parsed_file self._Reset() def Validate(self): ''' Computes whether all the data in the files for the given languages is valid. ''' self._Reset() self._ValidateMissingKeys() self._ValidateOutdatedKeys() def valid(self): return (len(self._missing_in_master) == 0 and len(self._missing_in_lang) == 0 and len(self._outdated_in_lang) == 0) def missing_in_master(self): return self._missing_in_master def missing_in_lang(self): return self._missing_in_lang def outdated_in_lang(self): return self._outdated_in_lang def _Reset(self): # These are maps from language to string name list self._missing_in_master = {} self._missing_in_lang = {} self._outdated_in_lang = {} def _ValidateMissingKeys(self): ''' Computes whether there are missing keys on either side. ''' master_keys = frozenset(self._master.iterkeys()) for lang, file in self._langs.iteritems(): keys = frozenset(file.iterkeys()) missing_in_master = keys - master_keys missing_in_lang = master_keys - keys if len(missing_in_master) > 0: self._missing_in_master[lang] = missing_in_master if len(missing_in_lang) > 0: self._missing_in_lang[lang] = missing_in_lang def _ValidateOutdatedKeys(self): ''' Computers whether any of the language keys are outdated with relation to the master keys. ''' for lang, file in self._langs.iteritems(): outdated = [] for key, str in file.iteritems(): # Get all revisions that touched master and language files for this # string. master_str = self._master[key] master_revs = master_str['revs'] lang_revs = str['revs'] if not master_revs or not lang_revs: print 'WARNING: No revision for %s in %s' % (key, lang) continue master_file = os.path.join(self._language_paths['en'], 'strings.xml') lang_file = os.path.join(self._language_paths[lang], 'strings.xml') # Assume that the repository has a single head (TODO: check that), # and as such there is always one revision which superceeds all others. master_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2), master_revs) lang_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2), lang_revs) # If the master version is newer than the lang version if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev): outdated.append(key) if len(outdated) > 0: self._outdated_in_lang[lang] = outdated
[ [ 8, 0, 0.0304, 0.0522, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0783, 0.0087, 0, 0.66, ...
[ "'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''", "import os", "from mytracks.parser import StringsParser", "import mytracks.history", "class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida...
''' Module for dealing with resource files (but not their contents). @author: Rodrigo Damazio ''' import os.path from glob import glob import re MYTRACKS_RES_DIR = 'MyTracks/res' ANDROID_MASTER_VALUES = 'values' ANDROID_VALUES_MASK = 'values-*' def GetMyTracksDir(): ''' Returns the directory in which the MyTracks directory is located. ''' path = os.getcwd() while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)): if path == '/': raise 'Not in My Tracks project' # Go up one level path = os.path.split(path)[0] return path def GetAllLanguageFiles(): ''' Returns a mapping from all found languages to their respective directories. ''' mytracks_path = GetMyTracksDir() res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK) language_dirs = glob(res_dir) master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES) if len(language_dirs) == 0: raise 'No languages found!' if not os.path.isdir(master_dir): raise 'Couldn\'t find master file' language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs] language_tuples.append(('en', master_dir)) return dict(language_tuples)
[ [ 8, 0, 0.0667, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.125, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1556, 0.0222, 0, 0.66, ...
[ "'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''", "import os.path", "from glob import glob", "import re", "MYTRACKS_RES_DIR = 'MyTracks/res'", "ANDROID_MASTER_VALUES = 'values'", "ANDROID_VALUES_MASK = 'values-*'", "def GetMyTracksDir():\n '''\n...
''' Module which parses a string XML file. @author: Rodrigo Damazio ''' from xml.parsers.expat import ParserCreate import re #import xml.etree.ElementTree as ET class StringsParser(object): ''' Parser for string XML files. This object is not thread-safe and should be used for parsing a single file at a time, only. ''' def Parse(self, file): ''' Parses the given file and returns a dictionary mapping keys to an object with attributes for that key, such as the value, start/end line and explicit revisions. In addition to the standard XML format of the strings file, this parser supports an annotation inside comments, in one of these formats: <!-- KEEP_PARENT name="bla" --> <!-- KEEP_PARENT name="bla" rev="123456789012" --> Such an annotation indicates that we're explicitly inheriting form the master file (and the optional revision says that this decision is compatible with the master file up to that revision). @param file: the name of the file to parse ''' self._Reset() # Unfortunately expat is the only parser that will give us line numbers self._xml_parser = ParserCreate() self._xml_parser.StartElementHandler = self._StartElementHandler self._xml_parser.EndElementHandler = self._EndElementHandler self._xml_parser.CharacterDataHandler = self._CharacterDataHandler self._xml_parser.CommentHandler = self._CommentHandler file_obj = open(file) self._xml_parser.ParseFile(file_obj) file_obj.close() return self._all_strings def _Reset(self): self._currentString = None self._currentStringName = None self._currentStringValue = None self._all_strings = {} def _StartElementHandler(self, name, attrs): if name != 'string': return if 'name' not in attrs: return assert not self._currentString assert not self._currentStringName self._currentString = { 'startLine' : self._xml_parser.CurrentLineNumber, } if 'rev' in attrs: self._currentString['revs'] = [attrs['rev']] self._currentStringName = attrs['name'] self._currentStringValue = '' def _EndElementHandler(self, name): if name != 'string': return assert self._currentString assert self._currentStringName self._currentString['value'] = self._currentStringValue self._currentString['endLine'] = self._xml_parser.CurrentLineNumber self._all_strings[self._currentStringName] = self._currentString self._currentString = None self._currentStringName = None self._currentStringValue = None def _CharacterDataHandler(self, data): if not self._currentString: return self._currentStringValue += data _KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+' r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?' r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*', re.MULTILINE | re.DOTALL) def _CommentHandler(self, data): keep_parent_match = self._KEEP_PARENT_REGEX.match(data) if not keep_parent_match: return name = keep_parent_match.group('name') self._all_strings[name] = { 'keepParent' : True, 'startLine' : self._xml_parser.CurrentLineNumber, 'endLine' : self._xml_parser.CurrentLineNumber } rev = keep_parent_match.group('rev') if rev: self._all_strings[name]['revs'] = [rev]
[ [ 8, 0, 0.0261, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0609, 0.0087, 0, 0.66, 0.3333, 573, 0, 1, 0, 0, 573, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66...
[ "'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''", "from xml.parsers.expat import ParserCreate", "import re", "class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n...
import urllib import urllib2 import argparse import sys from log import Logger from datetime import datetime, timedelta def verify_order(postdata, sandbox): data = 'cmd=_notify-validate&' + postdata if sandbox: scr = 'https://www.sandbox.paypal.com/cgi-bin/websc' else: scr = 'https://www.paypal.com/cgi-bin/websc' res = urllib2.urlopen(scr, data).read() if res == 'VERIFIED': return True return False def convert_order(item): value = {} value['name'] = item['address_name'] value['mail'] = item['payer_email'] value['nickname'] = item['address_name'] gross = float(item['mc_gross']) fee = float(item['mc_fee']) value['amount'] = gross value['actual_amount'] = gross - fee value['unit'] = 'USD' value['comment'] = '' try: value['time'] = datetime.strptime( item['payment_date'], '%H:%M:%S %b %d, %Y PDT') + timedelta(hours = 15) except: value['time'] = datetime.strptime( item['payment_date'], '%H:%M:%S %b %d, %Y PST') + timedelta(hours = 16) value['method'] = 'paypal' value['id'] = item['txn_id'] return value def get_order(postdata): fields = postdata.split('&') item = {} for field in fields: name, value = field.split('=') value = urllib.unquote_plus(value) item[name] = value for field in item: item[field] = item[field].decode(item['charset']) return item def main(): parser = argparse.ArgumentParser() parser.add_argument('-l', '--log', dest='logfile', help='Logfile', required=True) parser.add_argument('-p', '--paypal', dest='paypal', help='Paypal input', required=True) args = parser.parse_args() item = get_order(args.paypal) if not verify_order(args.paypal, 'test_ipn' in item): print 'Error in verification' print args.paypal sys.exit(1) if item['payment_status'] != 'Completed': print 'Payment from ', item['address_name'], ' not completed ', item['txn_id'] print args.paypal sys.exit(1) logitem = convert_order(item) logfile = args.logfile if 'test_ipn' in item: logfile = 'test' logger = Logger({'logfile': logfile}) logger.log(logitem) logger.close() print (u'Received payment from %s, txn_id=%s, amount=$%.2f, date=%s' % ( logitem['name'], logitem['id'], logitem['amount'], item['payment_date'] )).encode('utf-8') if __name__ == '__main__': main()
[ [ 1, 0, 0.012, 0.012, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0241, 0.012, 0, 0.66, 0.1, 345, 0, 1, 0, 0, 345, 0, 0 ], [ 1, 0, 0.0361, 0.012, 0, 0.66, ...
[ "import urllib", "import urllib2", "import argparse", "import sys", "from log import Logger", "from datetime import datetime, timedelta", "def verify_order(postdata, sandbox):\n data = 'cmd=_notify-validate&' + postdata\n if sandbox:\n scr = 'https://www.sandbox.paypal.com/cgi-bin/websc'\n else:\n...
from hgapi import * import shutil import os import threading import optparse import traceback import log from datetime import datetime, timedelta class HGDonationLog: def __init__(self, config, logger): self._path = config['path'] self._repo = Repo(self._path) self._target = config['logfile'] self._templatefile = config['template'] self._update_interval = float(config.get('update_interval', 300)) self._logger = logger self.lastupdate = datetime.fromtimestamp(0) def start_listener(self): self.thread = threading.Thread(target = self.listener_thread) self.thread.start() def listener_thread(self): self.update_file() while True: sleep(self._update_interval) lastdataupdate = logger.lastupdate() if lastdataupdate > self.lastupdate: self.update_file() def gen_file(self, path): with open(path, 'w') as f: with open(self._templatefile, 'r') as tempfile: template = tempfile.read().decode('utf-8') usd, usdnofee, cny = self._logger.read_total() chinatime = (datetime.utcnow() + timedelta(hours = 8)) lastupdatestr = chinatime.strftime('%Y-%b-%d %X') + ' GMT+8' s = template.format(cny = cny, usd = usd, usdnofee = usdnofee, lastupdate = lastupdatestr) f.write(s.encode('utf-8')) records = self._logger.read_records() records.reverse() for item in records: t = item['time'].strftime('%b %d, %Y') item['datestr'] = t s = u'|| {nickname} || {amount:.2f} {unit} || {datestr} ||'.format(**item) print >>f, s.encode('utf-8') def update_file(self): try: self._repo.hg_command('pull') self._repo.hg_update(self._repo.hg_heads()[0]) path = os.path.join(self._path, self._target) print 'update donation log on wiki at ', datetime.utcnow() + timedelta(hours=8) self.gen_file(path) print 'File generated' msg = 'Auto update from script' diff = self._repo.hg_command('diff', self._target) if diff == '': print 'No change, skipping update donation wiki' return else: print diff.encode('utf-8') self._repo.hg_commit(msg, files = [self._target]) print 'change committed' self._repo.hg_command('push') print 'repo pushed to server' self.lastupdate = datetime.utcnow() + timedelta(hours = 8) except Exception as ex: print 'Update wiki failed: ', str(ex).encode('utf-8') traceback.print_exc() def main(): parser = optparse.OptionParser() parser.add_option('-w', '--wiki', dest='path', help='Your wiki repo') parser.add_option('-o', '--output', dest='logfile', help='Your logging file') parser.add_option('-t', '--template', dest='template', help='Your template file') parser.add_option('-l', '--logfile', dest='log', help='Log file') options, args = parser.parse_args() logger = log.Logger({'logfile': options.log}) print vars(options) hgclient = HGDonationLog(vars(options), logger) hgclient.update_file() if __name__ == '__main__': main()
[ [ 1, 0, 0.011, 0.011, 0, 0.66, 0, 757, 0, 1, 0, 0, 757, 0, 0 ], [ 1, 0, 0.022, 0.011, 0, 0.66, 0.1, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.033, 0.011, 0, 0.66, ...
[ "from hgapi import *", "import shutil", "import os", "import threading", "import optparse", "import traceback", "import log", "from datetime import datetime, timedelta", "class HGDonationLog:\n def __init__(self, config, logger):\n self._path = config['path']\n self._repo = Repo(self._path)\n...
import subprocess import tempfile import shutil import os import codecs import json import zipfile class Packer: def __init__(self, input_path, outputfile): self.input_path = os.path.abspath(input_path) self.outputfile = os.path.abspath(outputfile) self.tmppath = None def pack(self): if self.tmppath == None: self.tmppath = tempfile.mkdtemp() else: self.tmppath = os.path.abspath(self.tmppath) if not os.path.isdir(self.tmppath): os.mkdir(self.tmppath) self.zipf = zipfile.ZipFile(self.outputfile, 'w', zipfile.ZIP_DEFLATED) self.processdir('') self.zipf.close() def processdir(self, path): dst = os.path.join(self.tmppath, path) if not os.path.isdir(dst): os.mkdir(dst) for f in os.listdir(os.path.join(self.input_path, path)): abspath = os.path.join(self.input_path, path, f) if os.path.isdir(abspath): self.processdir(os.path.join(path, f)) else: self.processfile(os.path.join(path, f)) def compact_json(self, src, dst): print 'Compacting json file ', src with open(src) as s: sval = s.read() if sval[:3] == codecs.BOM_UTF8: sval = sval[3:].decode('utf-8') val = json.loads(sval, 'utf-8') with open(dst, 'w') as d: json.dump(val, d, separators=(',', ':')) def processfile(self, path): src = os.path.join(self.input_path, path) dst = os.path.join(self.tmppath, path) if not os.path.isfile(dst) or os.stat(src).st_mtime > os.stat(dst).st_mtime: ext = os.path.splitext(path)[1].lower() op = None if ext == '.js': if path.split(os.sep)[0] == 'settings': op = self.copyfile elif os.path.basename(path) == 'jquery.js': op = self.copyfile else: op = self.compilefile elif ext == '.json': op = self.compact_json elif ext in ['.swp', '.php']: pass else: op = self.copyfile if op != None: op(src, dst) if os.path.isfile(dst): self.zipf.write(dst, path) def copyfile(self, src, dst): shutil.copyfile(src, dst) def compilefile(self, src, dst): args = ['java', '-jar', 'compiler.jar',\ '--js', src, '--js_output_file', dst] args += ['--language_in', 'ECMASCRIPT5'] print 'Compiling ', src retval = subprocess.call(args) if retval != 0: os.remove(dst) print 'Failed to generate ', dst a = Packer('..\\chrome', '..\\plugin.zip') a.tmppath = '..\\output' a.pack()
[ [ 1, 0, 0.0116, 0.0116, 0, 0.66, 0, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.0233, 0.0116, 0, 0.66, 0.1, 516, 0, 1, 0, 0, 516, 0, 0 ], [ 1, 0, 0.0349, 0.0116, 0, 0.6...
[ "import subprocess", "import tempfile", "import shutil", "import os", "import codecs", "import json", "import zipfile", "class Packer:\n def __init__(self, input_path, outputfile):\n self.input_path = os.path.abspath(input_path)\n self.outputfile = os.path.abspath(outputfile)\n self.tmppath ...
#!/usr/bin/env python # # Copyright 2006, 2007 Google Inc. All Rights Reserved. # Author: danderson@google.com (David Anderson) # # Script for uploading files to a Google Code project. # # This is intended to be both a useful script for people who want to # streamline project uploads and a reference implementation for # uploading files to Google Code projects. # # To upload a file to Google Code, you need to provide a path to the # file on your local machine, a small summary of what the file is, a # project name, and a valid account that is a member or owner of that # project. You can optionally provide a list of labels that apply to # the file. The file will be uploaded under the same name that it has # in your local filesystem (that is, the "basename" or last path # component). Run the script with '--help' to get the exact syntax # and available options. # # Note that the upload script requests that you enter your # googlecode.com password. This is NOT your Gmail account password! # This is the password you use on googlecode.com for committing to # Subversion and uploading files. You can find your password by going # to http://code.google.com/hosting/settings when logged in with your # Gmail account. If you have already committed to your project's # Subversion repository, the script will automatically retrieve your # credentials from there (unless disabled, see the output of '--help' # for details). # # If you are looking at this script as a reference for implementing # your own Google Code file uploader, then you should take a look at # the upload() function, which is the meat of the uploader. You # basically need to build a multipart/form-data POST request with the # right fields and send it to https://PROJECT.googlecode.com/files . # Authenticate the request using HTTP Basic authentication, as is # shown below. # # Licensed under the terms of the Apache Software License 2.0: # http://www.apache.org/licenses/LICENSE-2.0 # # Questions, comments, feature requests and patches are most welcome. # Please direct all of these to the Google Code users group: # http://groups.google.com/group/google-code-hosting """Google Code file uploader script. """ __author__ = 'danderson@google.com (David Anderson)' import httplib import os.path import optparse import getpass import base64 import sys def upload(file, project_name, user_name, password, summary, labels=None): """Upload a file to a Google Code project's file server. Args: file: The local path to the file. project_name: The name of your project on Google Code. user_name: Your Google account name. password: The googlecode.com password for your account. Note that this is NOT your global Google Account password! summary: A small description for the file. labels: an optional list of label strings with which to tag the file. Returns: a tuple: http_status: 201 if the upload succeeded, something else if an error occured. http_reason: The human-readable string associated with http_status file_url: If the upload succeeded, the URL of the file on Google Code, None otherwise. """ # The login is the user part of user@gmail.com. If the login provided # is in the full user@domain form, strip it down. if user_name.endswith('@gmail.com'): user_name = user_name[:user_name.index('@gmail.com')] form_fields = [('summary', summary)] if labels is not None: form_fields.extend([('label', l.strip()) for l in labels]) content_type, body = encode_upload_request(form_fields, file) upload_host = '%s.googlecode.com' % project_name upload_uri = '/files' auth_token = base64.b64encode('%s:%s'% (user_name, password)) headers = { 'Authorization': 'Basic %s' % auth_token, 'User-Agent': 'Googlecode.com uploader v0.9.4', 'Content-Type': content_type, } server = httplib.HTTPSConnection(upload_host) server.request('POST', upload_uri, body, headers) resp = server.getresponse() server.close() if resp.status == 201: location = resp.getheader('Location', None) else: location = None return resp.status, resp.reason, location def encode_upload_request(fields, file_path): """Encode the given fields and file into a multipart form body. fields is a sequence of (name, value) pairs. file is the path of the file to upload. The file will be uploaded to Google Code with the same file name. Returns: (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla' CRLF = '\r\n' body = [] # Add the metadata about the upload first for key, value in fields: body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="%s"' % key, '', value, ]) # Now add the file itself file_name = os.path.basename(file_path) f = open(file_path, 'rb') file_content = f.read() f.close() body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="filename"; filename="%s"' % file_name, # The upload server determines the mime-type, no need to set it. 'Content-Type: application/octet-stream', '', file_content, ]) # Finalize the form body body.extend(['--' + BOUNDARY + '--', '']) return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body) def upload_find_auth(file_path, project_name, summary, labels=None, user_name=None, password=None, tries=3): """Find credentials and upload a file to a Google Code project's file server. file_path, project_name, summary, and labels are passed as-is to upload. Args: file_path: The local path to the file. project_name: The name of your project on Google Code. summary: A small description for the file. labels: an optional list of label strings with which to tag the file. config_dir: Path to Subversion configuration directory, 'none', or None. user_name: Your Google account name. tries: How many attempts to make. """ if user_name is None or password is None: from netrc import netrc authenticators = netrc().authenticators("code.google.com") if authenticators: if user_name is None: user_name = authenticators[0] if password is None: password = authenticators[2] while tries > 0: if user_name is None: # Read username if not specified or loaded from svn config, or on # subsequent tries. sys.stdout.write('Please enter your googlecode.com username: ') sys.stdout.flush() user_name = sys.stdin.readline().rstrip() if password is None: # Read password if not loaded from svn config, or on subsequent tries. print 'Please enter your googlecode.com password.' print '** Note that this is NOT your Gmail account password! **' print 'It is the password you use to access Subversion repositories,' print 'and can be found here: http://code.google.com/hosting/settings' password = getpass.getpass() status, reason, url = upload(file_path, project_name, user_name, password, summary, labels) # Returns 403 Forbidden instead of 401 Unauthorized for bad # credentials as of 2007-07-17. if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]: # Rest for another try. user_name = password = None tries = tries - 1 else: # We're done. break return status, reason, url def main(): parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY ' '-p PROJECT [options] FILE') parser.add_option('-s', '--summary', dest='summary', help='Short description of the file') parser.add_option('-p', '--project', dest='project', help='Google Code project name') parser.add_option('-u', '--user', dest='user', help='Your Google Code username') parser.add_option('-w', '--password', dest='password', help='Your Google Code password') parser.add_option('-l', '--labels', dest='labels', help='An optional list of comma-separated labels to attach ' 'to the file') options, args = parser.parse_args() if not options.summary: parser.error('File summary is missing.') elif not options.project: parser.error('Project name is missing.') elif len(args) < 1: parser.error('File to upload not provided.') elif len(args) > 1: parser.error('Only one file may be specified.') file_path = args[0] if options.labels: labels = options.labels.split(',') else: labels = None status, reason, url = upload_find_auth(file_path, options.project, options.summary, labels, options.user, options.password) if url: print 'The file was uploaded successfully.' print 'URL: %s' % url return 0 else: print 'An error occurred. Your file was not uploaded.' print 'Google Code upload server said: %s (%s)' % (reason, status) return 1 if __name__ == '__main__': sys.exit(main())
[ [ 8, 0, 0.1816, 0.0078, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1914, 0.0039, 0, 0.66, 0.0833, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1992, 0.0039, 0, 0.66,...
[ "\"\"\"Google Code file uploader script.\n\"\"\"", "__author__ = 'danderson@google.com (David Anderson)'", "import httplib", "import os.path", "import optparse", "import getpass", "import base64", "import sys", "def upload(file, project_name, user_name, password, summary, labels=None):\n \"\"\"Uplo...
# Copyright (c) 2012 eagleonhill(qiuc12@gmail.com). All rights reserved. # Use of this source code is governed by a Mozilla-1.1 license that can be # found in the LICENSE file. import googlecode_upload import tempfile import urllib2 import optparse import os extensionid = 'lgllffgicojgllpmdbemgglaponefajn' def download(): url = ("https://clients2.google.com/service/update2/crx?" "response=redirect&x=id%3D" + extensionid + "%26uc") response = urllib2.urlopen(url) filename = response.geturl().split('/')[-1] version = '.'.join(filename.replace('_', '.').split('.')[1:-1]) name = os.path.join(tempfile.gettempdir(), filename) f = open(name, 'wb') data = response.read() f.write(data) f.close() return name, version def upload(path, version, user, password): summary = 'Extension version ' + version + ' download' labels = ['Type-Executable'] print googlecode_upload.upload( path, 'np-activex', user, password, summary, labels) def main(): parser = optparse.OptionParser() parser.add_option('-u', '--user', dest='user', help='Your Google Code username') parser.add_option('-w', '--password', dest='password', help='Your Google Code password') options, args = parser.parse_args() name, version = download() print 'File downloaded ', name, version upload(name, version, options.user, options.password) os.remove(name) if __name__ == '__main__': main()
[ [ 1, 0, 0.093, 0.0233, 0, 0.66, 0, 481, 0, 1, 0, 0, 481, 0, 0 ], [ 1, 0, 0.1163, 0.0233, 0, 0.66, 0.1111, 516, 0, 1, 0, 0, 516, 0, 0 ], [ 1, 0, 0.1395, 0.0233, 0, 0...
[ "import googlecode_upload", "import tempfile", "import urllib2", "import optparse", "import os", "extensionid = 'lgllffgicojgllpmdbemgglaponefajn'", "def download():\n url = (\"https://clients2.google.com/service/update2/crx?\"\n \"response=redirect&x=id%3D\" + extensionid + \"%26uc\")\n respons...
maxVf = 200 # Generating the header head = """// Copyright qiuc12@gmail.com // This file is generated autmatically by python. DONT MODIFY IT! #pragma once #include <OleAuto.h> class FakeDispatcher; HRESULT DualProcessCommand(int commandId, FakeDispatcher *disp, ...); extern "C" void DualProcessCommandWrap(); class FakeDispatcherBase : public IDispatch { private:""" pattern = """ \tvirtual HRESULT __stdcall fv{0}(char x) {{ \t\tva_list va = &x; \t\tHRESULT ret = ProcessCommand({0}, va); \t\tva_end(va); \t\treturn ret; \t}} """ pattern = """ \tvirtual HRESULT __stdcall fv{0}();""" end = """ protected: \tconst static int kMaxVf = {0}; }}; """ f = open("FakeDispatcherBase.h", "w") f.write(head) for i in range(0, maxVf): f.write(pattern.format(i)) f.write(end.format(maxVf)) f.close() head = """; Copyright qiuc12@gmail.com ; This file is generated automatically by python. DON'T MODIFY IT! """ f = open("FakeDispatcherBase.asm", "w") f.write(head) f.write(".386\n") f.write(".model flat\n") f.write("_DualProcessCommandWrap proto\n") ObjFormat = "?fv{0}@FakeDispatcherBase@@EAGJXZ" for i in range(0, maxVf): f.write("PUBLIC " + ObjFormat.format(i) + "\n") f.write(".code\n") for i in range(0, maxVf): f.write(ObjFormat.format(i) + " proc\n") f.write(" push {0}\n".format(i)) f.write(" jmp _DualProcessCommandWrap\n") f.write(ObjFormat.format(i) + " endp\n") f.write("\nend\n") f.close()
[ [ 14, 0, 0.0172, 0.0172, 0, 0.66, 0, 302, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 0, 0.1466, 0.1724, 0, 0.66, 0.0476, 217, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.319, 0.1379, 0, 0....
[ "maxVf = 200", "head = \"\"\"// Copyright qiuc12@gmail.com\n// This file is generated autmatically by python. DONT MODIFY IT!\n\n#pragma once\n#include <OleAuto.h>\nclass FakeDispatcher;\nHRESULT DualProcessCommand(int commandId, FakeDispatcher *disp, ...);\nextern \"C\" void DualProcessCommandWrap();", "patter...
import time def yesno(question): val = raw_input(question + " ") return val.startswith("y") or val.startswith("Y") use_pysqlite2 = yesno("Use pysqlite 2.0?") use_autocommit = yesno("Use autocommit?") use_executemany= yesno("Use executemany?") if use_pysqlite2: from pysqlite2 import dbapi2 as sqlite else: import sqlite def create_db(): con = sqlite.connect(":memory:") if use_autocommit: if use_pysqlite2: con.isolation_level = None else: con.autocommit = True cur = con.cursor() cur.execute(""" create table test(v text, f float, i integer) """) cur.close() return con def test(): row = ("sdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffasfd", 3.14, 42) l = [] for i in range(1000): l.append(row) con = create_db() cur = con.cursor() if sqlite.version_info > (2, 0): sql = "insert into test(v, f, i) values (?, ?, ?)" else: sql = "insert into test(v, f, i) values (%s, %s, %s)" starttime = time.time() for i in range(50): if use_executemany: cur.executemany(sql, l) else: for r in l: cur.execute(sql, r) endtime = time.time() print "elapsed", endtime - starttime cur.execute("select count(*) from test") print "rows:", cur.fetchone()[0] if __name__ == "__main__": test()
[ [ 1, 0, 0.0164, 0.0164, 0, 0.66, 0, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 2, 0, 0.0656, 0.0492, 0, 0.66, 0.125, 796, 0, 1, 1, 0, 0, 0, 3 ], [ 14, 1, 0.0656, 0.0164, 1, 0....
[ "import time", "def yesno(question):\n val = raw_input(question + \" \")\n return val.startswith(\"y\") or val.startswith(\"Y\")", " val = raw_input(question + \" \")", " return val.startswith(\"y\") or val.startswith(\"Y\")", "use_pysqlite2 = yesno(\"Use pysqlite 2.0?\")", "use_autocommit = y...
import time def yesno(question): val = raw_input(question + " ") return val.startswith("y") or val.startswith("Y") use_pysqlite2 = yesno("Use pysqlite 2.0?") if use_pysqlite2: use_custom_types = yesno("Use custom types?") use_dictcursor = yesno("Use dict cursor?") use_rowcursor = yesno("Use row cursor?") else: use_tuple = yesno("Use rowclass=tuple?") if use_pysqlite2: from pysqlite2 import dbapi2 as sqlite else: import sqlite def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d if use_pysqlite2: def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d class DictCursor(sqlite.Cursor): def __init__(self, *args, **kwargs): sqlite.Cursor.__init__(self, *args, **kwargs) self.row_factory = dict_factory class RowCursor(sqlite.Cursor): def __init__(self, *args, **kwargs): sqlite.Cursor.__init__(self, *args, **kwargs) self.row_factory = sqlite.Row def create_db(): if sqlite.version_info > (2, 0): if use_custom_types: con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES) sqlite.register_converter("text", lambda x: "<%s>" % x) else: con = sqlite.connect(":memory:") if use_dictcursor: cur = con.cursor(factory=DictCursor) elif use_rowcursor: cur = con.cursor(factory=RowCursor) else: cur = con.cursor() else: if use_tuple: con = sqlite.connect(":memory:") con.rowclass = tuple cur = con.cursor() else: con = sqlite.connect(":memory:") cur = con.cursor() cur.execute(""" create table test(v text, f float, i integer) """) return (con, cur) def test(): row = ("sdfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffasfd", 3.14, 42) l = [] for i in range(1000): l.append(row) con, cur = create_db() if sqlite.version_info > (2, 0): sql = "insert into test(v, f, i) values (?, ?, ?)" else: sql = "insert into test(v, f, i) values (%s, %s, %s)" for i in range(50): cur.executemany(sql, l) cur.execute("select count(*) as cnt from test") starttime = time.time() for i in range(50): cur.execute("select v, f, i from test") l = cur.fetchall() endtime = time.time() print "elapsed:", endtime - starttime if __name__ == "__main__": test()
[ [ 1, 0, 0.0103, 0.0103, 0, 0.66, 0, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 2, 0, 0.0412, 0.0309, 0, 0.66, 0.1111, 796, 0, 1, 1, 0, 0, 0, 3 ], [ 14, 1, 0.0412, 0.0103, 1, 0...
[ "import time", "def yesno(question):\n val = raw_input(question + \" \")\n return val.startswith(\"y\") or val.startswith(\"Y\")", " val = raw_input(question + \" \")", " return val.startswith(\"y\") or val.startswith(\"Y\")", "use_pysqlite2 = yesno(\"Use pysqlite 2.0?\")", "if use_pysqlite2:\...
#!/usr/bin/env python from pysqlite2.test import test test()
[ [ 1, 0, 0.6667, 0.3333, 0, 0.66, 0, 472, 0, 1, 0, 0, 472, 0, 0 ], [ 8, 0, 1, 0.3333, 0, 0.66, 1, 224, 3, 0, 0, 0, 0, 0, 1 ] ]
[ "from pysqlite2.test import test", "test()" ]
from pysqlite2 import dbapi2 as sqlite import os, threading def getcon(): #con = sqlite.connect("db", isolation_level=None, timeout=5.0) con = sqlite.connect(":memory:") cur = con.cursor() cur.execute("create table test(i, s)") for i in range(10): cur.execute("insert into test(i, s) values (?, 'asfd')", (i,)) con.commit() cur.close() return con def reader(what): con = getcon() while 1: cur = con.cursor() cur.execute("select i, s from test where i % 1000=?", (what,)) res = cur.fetchall() cur.close() con.close() def appender(): con = getcon() counter = 0 while 1: cur = con.cursor() cur.execute("insert into test(i, s) values (?, ?)", (counter, "foosadfasfasfsfafs")) #cur.execute("insert into test(foo) values (?)", (counter,)) counter += 1 if counter % 100 == 0: #print "appender committing", counter con.commit() cur.close() con.close() def updater(): con = getcon() counter = 0 while 1: cur = con.cursor() counter += 1 if counter % 5 == 0: cur.execute("update test set s='foo' where i % 50=0") #print "updater committing", counter con.commit() cur.close() con.close() def deleter(): con = getcon() counter = 0 while 1: cur = con.cursor() counter += 1 if counter % 5 == 0: #print "deleter committing", counter cur.execute("delete from test where i % 20=0") con.commit() cur.close() con.close() threads = [] for i in range(10): continue threads.append(threading.Thread(target=lambda: reader(i))) for i in range(5): threads.append(threading.Thread(target=appender)) #threads.append(threading.Thread(target=updater)) #threads.append(threading.Thread(target=deleter)) for t in threads: t.start()
[ [ 1, 0, 0.0125, 0.0125, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.025, 0.0125, 0, 0.66, 0.1, 688, 0, 2, 0, 0, 688, 0, 0 ], [ 2, 0, 0.1062, 0.125, 0, 0.66,...
[ "from pysqlite2 import dbapi2 as sqlite", "import os, threading", "def getcon():\n #con = sqlite.connect(\"db\", isolation_level=None, timeout=5.0)\n con = sqlite.connect(\":memory:\")\n cur = con.cursor()\n cur.execute(\"create table test(i, s)\")\n for i in range(10):\n cur.execute(\"ins...
from __future__ import with_statement from pysqlite2 import dbapi2 as sqlite3 from datetime import datetime, timedelta import time def read_modify_write(): # Open connection and create example schema and data. # In reality, open a database file instead of an in-memory database. con = sqlite3.connect(":memory:") cur = con.cursor() cur.executescript(""" create table test(id integer primary key, data); insert into test(data) values ('foo'); insert into test(data) values ('bar'); insert into test(data) values ('baz'); """) # The read part. There are two ways for fetching data using pysqlite. # 1. "Lazy-reading" # cur.execute("select ...") # for row in cur: # ... # # Advantage: Low memory consumption, good for large resultsets, data is # fetched on demand. # Disadvantage: Database locked as long as you iterate over cursor. # # 2. "Eager reading" # cur.fetchone() to fetch one row # cur.fetchall() to fetch all rows # Advantage: Locks cleared ASAP. # Disadvantage: fetchall() may build large lists. cur.execute("select id, data from test where id=?", (2,)) row = cur.fetchone() # Stupid way to modify the data column. lst = list(row) lst[1] = lst[1] + " & more" # This is the suggested recipe to modify data using pysqlite. We use # pysqlite's proprietary API to use the connection object as a context # manager. This is equivalent to the following code: # # try: # cur.execute("...") # except: # con.rollback() # raise # finally: # con.commit() # # This makes sure locks are cleared - either by commiting or rolling back # the transaction. # # If the rollback happens because of concurrency issues, you just have to # try again until it succeeds. Much more likely is that the rollback and # the raised exception happen because of other reasons, though (constraint # violation, etc.) - don't forget to roll back on errors. # # Or use this recipe. It's useful and gets everything done in two lines of # code. with con: cur.execute("update test set data=? where id=?", (lst[1], lst[0])) def delete_older_than(): # Use detect_types if you want to use date/time types in pysqlite. con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_COLNAMES) cur = con.cursor() # With "DEFAULT current_timestamp" we have SQLite fill the timestamp column # automatically. cur.executescript(""" create table test(id integer primary key, data, created timestamp default current_timestamp); """) with con: for i in range(3): cur.execute("insert into test(data) values ('foo')") time.sleep(1) # Delete older than certain interval # SQLite uses UTC time, so we need to create these timestamps in Python, too. with con: delete_before = datetime.utcnow() - timedelta(seconds=2) cur.execute("delete from test where created < ?", (delete_before,)) def modify_insert(): # Use a unique index and the REPLACE command to have the "insert if not # there, but modify if it is there" pattern. Race conditions are taken care # of by transactions. con = sqlite3.connect(":memory:") cur = con.cursor() cur.executescript(""" create table test(id integer primary key, name, age); insert into test(name, age) values ('Adam', 18); insert into test(name, age) values ('Eve', 21); create unique index idx_test_data_unique on test(name); """) with con: # Make Adam age 19 cur.execute("replace into test(name, age) values ('Adam', 19)") # Create new entry cur.execute("replace into test(name, age) values ('Abel', 3)") if __name__ == "__main__": read_modify_write() delete_older_than() modify_insert()
[ [ 1, 0, 0.0091, 0.0091, 0, 0.66, 0, 777, 0, 1, 0, 0, 777, 0, 0 ], [ 1, 0, 0.0182, 0.0091, 0, 0.66, 0.1429, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.0273, 0.0091, 0, ...
[ "from __future__ import with_statement", "from pysqlite2 import dbapi2 as sqlite3", "from datetime import datetime, timedelta", "import time", "def read_modify_write():\n # Open connection and create example schema and data.\n # In reality, open a database file instead of an in-memory database.\n c...
from pysqlite2 import dbapi2 as sqlite3 Cursor = sqlite3.Cursor class EagerCursor(Cursor): def __init__(self, con): Cursor.__init__(self, con) self.rows = [] self.pos = 0 def execute(self, *args): sqlite3.Cursor.execute(self, *args) self.rows = Cursor.fetchall(self) self.pos = 0 def fetchone(self): try: row = self.rows[self.pos] self.pos += 1 return row except IndexError: return None def fetchmany(self, num=None): if num is None: num = self.arraysize result = self.rows[self.pos:self.pos+num] self.pos += num return result def fetchall(self): result = self.rows[self.pos:] self.pos = len(self.rows) return result def test(): con = sqlite3.connect(":memory:") cur = con.cursor(EagerCursor) cur.execute("create table test(foo)") cur.executemany("insert into test(foo) values (?)", [(3,), (4,), (5,)]) cur.execute("select * from test") print cur.fetchone() print cur.fetchone() print cur.fetchone() print cur.fetchone() print cur.fetchone() if __name__ == "__main__": test()
[ [ 1, 0, 0.0196, 0.0196, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.0588, 0.0196, 0, 0.66, 0.25, 647, 7, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.3922, 0.6078, 0, 0.6...
[ "from pysqlite2 import dbapi2 as sqlite3", "Cursor = sqlite3.Cursor", "class EagerCursor(Cursor):\n def __init__(self, con):\n Cursor.__init__(self, con)\n self.rows = []\n self.pos = 0\n\n def execute(self, *args):\n sqlite3.Cursor.execute(self, *args)", " def __init__(se...
#!/usr/bin/env python # # Cross-compile and build pysqlite installers for win32 on Linux or Mac OS X. # # The way this works is very ugly, but hey, it *works*! And I didn't have to # reinvent the wheel using NSIS. import os import sys import urllib import zipfile from setup import get_amalgamation # Cross-compiler if sys.platform == "darwin": CC = "/usr/local/i386-mingw32-4.3.0/bin/i386-mingw32-gcc" LIBDIR = "lib.macosx-10.6-i386-2.5" STRIP = "/usr/local/i386-mingw32-4.3.0/bin/i386-mingw32-gcc --strip-all" else: CC = "/usr/bin/i586-mingw32msvc-gcc" LIBDIR = "lib.linux-i686-2.5" STRIP = "strip --strip-all" # Optimization settings OPT = "-O2" # pysqlite sources + SQLite amalgamation SRC = "src/module.c src/connection.c src/cursor.c src/cache.c src/microprotocols.c src/prepare_protocol.c src/statement.c src/util.c src/row.c amalgamation/sqlite3.c" # You will need to fetch these from # https://pyext-cross.pysqlite.googlecode.com/hg/ CROSS_TOOLS = "../pysqlite-pyext-cross" def execute(cmd): print cmd return os.system(cmd) def compile_module(pyver): VER = pyver.replace(".", "") INC = "%s/python%s/include" % (CROSS_TOOLS, VER) vars = locals() vars.update(globals()) cmd = '%(CC)s -mno-cygwin %(OPT)s -mdll -DMODULE_NAME=\\"pysqlite2._sqlite\\" -DSQLITE_ENABLE_RTREE=1 -DSQLITE_ENABLE_FTS3=1 -I amalgamation -I %(INC)s -I . %(SRC)s -L %(CROSS_TOOLS)s/python%(VER)s/libs -lpython%(VER)s -o build/%(LIBDIR)s/pysqlite2/_sqlite.pyd' % vars execute(cmd) execute("%(STRIP)s build/%(LIBDIR)s/pysqlite2/_sqlite.pyd" % vars) def main(): vars = locals() vars.update(globals()) get_amalgamation() for ver in ["2.5", "2.6", "2.7"]: execute("rm -rf build") # First, compile the host version. This is just to get the .py files in place. execute("python2.5 setup.py build") # Yes, now delete the host extension module. What a waste of time. os.unlink("build/%(LIBDIR)s/pysqlite2/_sqlite.so" % vars) # Cross-compile win32 extension module. compile_module(ver) # Prepare for target Python version. libdir_ver = LIBDIR[:-3] + ver os.rename("build/%(LIBDIR)s" % vars, "build/" + libdir_ver) # And create the installer! os.putenv("PYEXT_CROSS", CROSS_TOOLS) execute("python2.5 setup.py cross_bdist_wininst --skip-build --target-version=" + ver) if __name__ == "__main__": main()
[ [ 1, 0, 0.1176, 0.0147, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.1324, 0.0147, 0, 0.66, 0.0833, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.1471, 0.0147, 0, ...
[ "import os", "import sys", "import urllib", "import zipfile", "from setup import get_amalgamation", "if sys.platform == \"darwin\":\n CC = \"/usr/local/i386-mingw32-4.3.0/bin/i386-mingw32-gcc\"\n LIBDIR = \"lib.macosx-10.6-i386-2.5\"\n STRIP = \"/usr/local/i386-mingw32-4.3.0/bin/i386-mingw32-gcc ...
# -*- coding: utf-8 -*- # # pysqlite documentation build configuration file, created by # sphinx-quickstart.py on Sat Mar 22 02:47:54 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys # If your extensions are in another directory, add it here. #sys.path.append('some/directory') # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.addons.*') or your custom ones. #extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'pysqlite' copyright = u'2008-2009, Gerhard Häring' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = '2.6' # The full version, including alpha/beta/rc tags. release = '2.6.0' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Content template for the index page. #html_index = '' # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True # Output file base name for HTML help builder. htmlhelp_basename = 'pysqlitedoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). #latex_documents = [] # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
[ [ 1, 0, 0.1061, 0.0076, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.2045, 0.0076, 0, 0.66, 0.0769, 518, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 0, 0.2273, 0.0076, 0, ...
[ "import sys", "templates_path = ['.templates']", "source_suffix = '.rst'", "master_doc = 'index'", "project = 'pysqlite'", "copyright = u'2008-2009, Gerhard Häring'", "version = '2.6'", "release = '2.6.0'", "today_fmt = '%B %d, %Y'", "pygments_style = 'sphinx'", "html_style = 'default.css'", "...
from pysqlite2 import dbapi2 as sqlite3 FIELD_MAX_WIDTH = 20 TABLE_NAME = 'people' SELECT = 'select * from %s order by age, name_last' % TABLE_NAME con = sqlite3.connect("mydb") cur = con.cursor() cur.execute(SELECT) # Print a header. for fieldDesc in cur.description: print fieldDesc[0].ljust(FIELD_MAX_WIDTH) , print # Finish the header with a newline. print '-' * 78 # For each row, print the value of each field left-justified within # the maximum possible width of that field. fieldIndices = range(len(cur.description)) for row in cur: for fieldIndex in fieldIndices: fieldValue = str(row[fieldIndex]) print fieldValue.ljust(FIELD_MAX_WIDTH) , print # Finish the row with a newline.
[ [ 1, 0, 0.5, 0.5, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ] ]
[ "from pysqlite2 import dbapi2 as sqlite3" ]
from pysqlite2 import dbapi2 as sqlite3 def progress(): print "Query still executing. Please wait ..." con = sqlite3.connect(":memory:") con.execute("create table test(x)") # Let's create some data con.executemany("insert into test(x) values (?)", [(x,) for x in xrange(300)]) # A progress handler, executed every 10 million opcodes con.set_progress_handler(progress, 10000000) # A particularly long-running query killer_stament = """ select count(*) from ( select t1.x from test t1, test t2, test t3 ) """ con.execute(killer_stament) print "-" * 50 # Clear the progress handler con.set_progress_handler(None, 0) con.execute(killer_stament)
[ [ 1, 0, 0.0345, 0.0345, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 2, 0, 0.1207, 0.069, 0, 0.66, 0.1, 82, 0, 0, 0, 0, 0, 0, 1 ], [ 8, 1, 0.1379, 0.0345, 1, 0.68, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "def progress():\n print(\"Query still executing. Please wait ...\")", " print(\"Query still executing. Please wait ...\")", "con = sqlite3.connect(\":memory:\")", "con.execute(\"create table test(x)\")", "con.executemany(\"insert into test(x) values (?)\",...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb") cur = con.cursor() SELECT = "select name_last, age from people order by age, name_last" # 1. Iterate over the rows available from the cursor, unpacking the # resulting sequences to yield their elements (name_last, age): cur.execute(SELECT) for (name_last, age) in cur: print '%s is %d years old.' % (name_last, age) # 2. Equivalently: cur.execute(SELECT) for row in cur: print '%s is %d years old.' % (row[0], row[1])
[ [ 1, 0, 0.0588, 0.0588, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.1765, 0.0588, 0, 0.66, 0.1429, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.2941, 0.0588, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "cur = con.cursor()", "SELECT = \"select name_last, age from people order by age, name_last\"", "cur.execute(SELECT)", "for (name_last, age) in cur:\n print('%s is %d years old.' % (name_last, age))", " print('%s is %d y...
from pysqlite2 import dbapi2 as sqlite3 # Create a connection to the database file "mydb": con = sqlite3.connect("mydb") # Get a Cursor object that operates in the context of Connection con: cur = con.cursor() # Execute the SELECT statement: cur.execute("select * from people order by age") # Retrieve all rows as a sequence and print that sequence: print cur.fetchall()
[ [ 1, 0, 0.0769, 0.0769, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.3077, 0.0769, 0, 0.66, 0.25, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.5385, 0.0769, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "cur = con.cursor()", "cur.execute(\"select * from people order by age\")", "print(cur.fetchall())" ]
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb")
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 1, 0.3333, 0, 0.66, 1, 761, 3, 1, 0, 0, 242, 10, 1 ] ]
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")" ]
from pysqlite2 import dbapi2 as sqlite3 class MySum: def __init__(self): self.count = 0 def step(self, value): self.count += value def finalize(self): return self.count con = sqlite3.connect(":memory:") con.create_aggregate("mysum", 1, MySum) cur = con.cursor() cur.execute("create table test(i)") cur.execute("insert into test(i) values (1)") cur.execute("insert into test(i) values (2)") cur.execute("select mysum(i) from test") print cur.fetchone()[0]
[ [ 1, 0, 0.05, 0.05, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.35, 0.45, 0, 0.66, 0.1111, 865, 0, 3, 0, 0, 0, 0, 0 ], [ 2, 1, 0.225, 0.1, 1, 0.23, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "class MySum:\n def __init__(self):\n self.count = 0\n\n def step(self, value):\n self.count += value\n\n def finalize(self):", " def __init__(self):\n self.count = 0", " self.count = 0", " def step(self, value):\n ...
# A minimal SQLite shell for experiments from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect(":memory:") con.isolation_level = None cur = con.cursor() buffer = "" print "Enter your SQL commands to execute in SQLite." print "Enter a blank line to exit." while True: line = raw_input() if line == "": break buffer += line if sqlite3.complete_statement(buffer): try: buffer = buffer.strip() cur.execute(buffer) if buffer.lstrip().upper().startswith("SELECT"): print cur.fetchall() except sqlite3.Error, e: print "An error occurred:", e.args[0] buffer = "" con.close()
[ [ 1, 0, 0.5, 0.5, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ] ]
[ "from pysqlite2 import dbapi2 as sqlite3" ]
from pysqlite2 import dbapi2 as sqlite3 def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d con = sqlite3.connect(":memory:") con.row_factory = dict_factory cur = con.cursor() cur.execute("select 1 as a") print cur.fetchone()["a"]
[ [ 1, 0, 0.0769, 0.0769, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 2, 0, 0.3846, 0.3846, 0, 0.66, 0.1667, 131, 0, 2, 1, 0, 0, 0, 1 ], [ 14, 1, 0.3077, 0.0769, 1, 0...
[ "from pysqlite2 import dbapi2 as sqlite3", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", " d = {}", " for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]", " d[col[0]] = row[...
from pysqlite2 import dbapi2 as sqlite3 import datetime con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) cur = con.cursor() cur.execute("create table test(d date, ts timestamp)") today = datetime.date.today() now = datetime.datetime.now() cur.execute("insert into test(d, ts) values (?, ?)", (today, now)) cur.execute("select d, ts from test") row = cur.fetchone() print today, "=>", row[0], type(row[0]) print now, "=>", row[1], type(row[1]) cur.execute('select current_date as "d [date]", current_timestamp as "ts [timestamp]"') row = cur.fetchone() print "current_date", row[0], type(row[0]) print "current_timestamp", row[1], type(row[1])
[ [ 1, 0, 0.05, 0.05, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.1, 0.05, 0, 0.66, 0.0667, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 14, 0, 0.2, 0.05, 0, 0.66, 0.1...
[ "from pysqlite2 import dbapi2 as sqlite3", "import datetime", "con = sqlite3.connect(\":memory:\", detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)", "cur = con.cursor()", "cur.execute(\"create table test(d date, ts timestamp)\")", "today = datetime.date.today()", "now = datetime.datetime.no...
from pysqlite2 import dbapi2 as sqlite3 def authorizer_callback(action, arg1, arg2, dbname, source): if action != sqlite3.SQLITE_SELECT: return sqlite3.SQLITE_DENY if arg1 == "private_table": return sqlite3.SQLITE_DENY return sqlite3.SQLITE_OK con = sqlite3.connect(":memory:") con.executescript(""" create table public_table(c1, c2); create table private_table(c1, c2); """) con.set_authorizer(authorizer_callback) try: con.execute("select * from private_table") except sqlite3.DatabaseError, e: print "SELECT FROM private_table =>", e.args[0] # access ... prohibited try: con.execute("insert into public_table(c1, c2) values (1, 2)") except sqlite3.DatabaseError, e: print "DML command =>", e.args[0] # access ... prohibited
[ [ 1, 0, 0.1111, 0.1111, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 2, 0, 0.6111, 0.6667, 0, 0.66, 1, 423, 0, 5, 1, 0, 0, 0, 0 ], [ 4, 1, 0.5, 0.2222, 1, 0.03, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "def authorizer_callback(action, arg1, arg2, dbname, source):\n if action != sqlite3.SQLITE_SELECT:\n return sqlite3.SQLITE_DENY\n if arg1 == \"private_table\":\n return sqlite3.SQLITE_DENY\n return sqlite3.SQLITE_OK", " if action != sqlite3.S...
from pysqlite2 import dbapi2 as sqlite3 # The shared cache is only available in SQLite versions 3.3.3 or later # See the SQLite documentaton for details. sqlite3.enable_shared_cache(True)
[ [ 1, 0, 0.1667, 0.1667, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 8, 0, 1, 0.1667, 0, 0.66, 1, 965, 3, 1, 0, 0, 0, 0, 1 ] ]
[ "from pysqlite2 import dbapi2 as sqlite3", "sqlite3.enable_shared_cache(True)" ]
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect(":memory:")
[ [ 1, 0, 0.3333, 0.3333, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 1, 0.3333, 0, 0.66, 1, 761, 3, 1, 0, 0, 242, 10, 1 ] ]
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\":memory:\")" ]
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb") cur = con.cursor() who = "Yeltsin" age = 72 cur.execute("select name_last, age from people where name_last=? and age=?", (who, age)) print cur.fetchone()
[ [ 1, 0, 0.0909, 0.0909, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.2727, 0.0909, 0, 0.66, 0.1667, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.4545, 0.0909, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "cur = con.cursor()", "who = \"Yeltsin\"", "age = 72", "cur.execute(\"select name_last, age from people where name_last=? and age=?\", (who, age))", "print(cur.fetchone())" ]
from __future__ import with_statement from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect(":memory:") con.execute("create table person (id integer primary key, firstname varchar unique)") # Successful, con.commit() is called automatically afterwards with con: con.execute("insert into person(firstname) values (?)", ("Joe",)) # con.rollback() is called after the with block finishes with an exception, the # exception is still raised and must be catched try: with con: con.execute("insert into person(firstname) values (?)", ("Joe",)) except sqlite3.IntegrityError: print "couldn't add Joe twice"
[ [ 1, 0, 0.0526, 0.0526, 0, 0.66, 0, 777, 0, 1, 0, 0, 777, 0, 0 ], [ 1, 0, 0.1053, 0.0526, 0, 0.66, 0.25, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.2105, 0.0526, 0, 0...
[ "from __future__ import with_statement", "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\":memory:\")", "con.execute(\"create table person (id integer primary key, firstname varchar unique)\")", " con.execute(\"insert into person(firstname) values (?)\", (\"Joe\",))", "try:\n with...
from pysqlite2 import dbapi2 as sqlite3 import md5 def md5sum(t): return md5.md5(t).hexdigest() con = sqlite3.connect(":memory:") con.create_function("md5", 1, md5sum) cur = con.cursor() cur.execute("select md5(?)", ("foo",)) print cur.fetchone()[0]
[ [ 1, 0, 0.0909, 0.0909, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.1818, 0.0909, 0, 0.66, 0.1429, 604, 0, 1, 0, 0, 604, 0, 0 ], [ 2, 0, 0.4091, 0.1818, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "import md5", "def md5sum(t):\n return md5.md5(t).hexdigest()", " return md5.md5(t).hexdigest()", "con = sqlite3.connect(\":memory:\")", "con.create_function(\"md5\", 1, md5sum)", "cur = con.cursor()", "cur.execute(\"select md5(?)\", (\"foo\",))", "pr...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb") con.row_factory = sqlite3.Row cur = con.cursor() cur.execute("select name_last, age from people") for row in cur: assert row[0] == row["name_last"] assert row["name_last"] == row["nAmE_lAsT"] assert row[1] == row["age"] assert row[1] == row["AgE"]
[ [ 1, 0, 0.0833, 0.0833, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.25, 0.0833, 0, 0.66, 0.2, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.3333, 0.0833, 0, 0....
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "con.row_factory = sqlite3.Row", "cur = con.cursor()", "cur.execute(\"select name_last, age from people\")", "for row in cur:\n assert row[0] == row[\"name_last\"]\n assert row[\"name_last\"] == row[\"nAmE_lAsT\"]\n ass...
from pysqlite2 import dbapi2 as sqlite3 class Point(object): def __init__(self, x, y): self.x, self.y = x, y def adapt_point(point): return "%f;%f" % (point.x, point.y) sqlite3.register_adapter(Point, adapt_point) con = sqlite3.connect(":memory:") cur = con.cursor() p = Point(4.0, -3.2) cur.execute("select ?", (p,)) print cur.fetchone()[0]
[ [ 1, 0, 0.0588, 0.0588, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.2353, 0.1765, 0, 0.66, 0.125, 652, 0, 1, 0, 0, 186, 0, 0 ], [ 2, 1, 0.2647, 0.1176, 1, 0...
[ "from pysqlite2 import dbapi2 as sqlite3", "class Point(object):\n def __init__(self, x, y):\n self.x, self.y = x, y", " def __init__(self, x, y):\n self.x, self.y = x, y", " self.x, self.y = x, y", "def adapt_point(point):\n return \"%f;%f\" % (point.x, point.y)", " retur...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect(":memory:") cur = con.cursor() # Create the table con.execute("create table person(lastname, firstname)") AUSTRIA = u"\xd6sterreich" # by default, rows are returned as Unicode cur.execute("select ?", (AUSTRIA,)) row = cur.fetchone() assert row[0] == AUSTRIA # but we can make pysqlite always return bytestrings ... con.text_factory = str cur.execute("select ?", (AUSTRIA,)) row = cur.fetchone() assert type(row[0]) == str # the bytestrings will be encoded in UTF-8, unless you stored garbage in the # database ... assert row[0] == AUSTRIA.encode("utf-8") # we can also implement a custom text_factory ... # here we implement one that will ignore Unicode characters that cannot be # decoded from UTF-8 con.text_factory = lambda x: unicode(x, "utf-8", "ignore") cur.execute("select ?", ("this is latin1 and would normally create errors" + u"\xe4\xf6\xfc".encode("latin1"),)) row = cur.fetchone() assert type(row[0]) == unicode # pysqlite offers a builtin optimized text_factory that will return bytestring # objects, if the data is in ASCII only, and otherwise return unicode objects con.text_factory = sqlite3.OptimizedUnicode cur.execute("select ?", (AUSTRIA,)) row = cur.fetchone() assert type(row[0]) == unicode cur.execute("select ?", ("Germany",)) row = cur.fetchone() assert type(row[0]) == str
[ [ 1, 0, 0.0238, 0.0238, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.0714, 0.0238, 0, 0.66, 0.0588, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.0952, 0.0238, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\":memory:\")", "cur = con.cursor()", "con.execute(\"create table person(lastname, firstname)\")", "AUSTRIA = u\"\\xd6sterreich\"", "cur.execute(\"select ?\", (AUSTRIA,))", "row = cur.fetchone()", "con.text_factory = str", "cur.execut...
from pysqlite2 import dbapi2 as sqlite3 class CountCursorsConnection(sqlite3.Connection): def __init__(self, *args, **kwargs): sqlite3.Connection.__init__(self, *args, **kwargs) self.numcursors = 0 def cursor(self, *args, **kwargs): self.numcursors += 1 return sqlite3.Connection.cursor(self, *args, **kwargs) con = sqlite3.connect(":memory:", factory=CountCursorsConnection) cur1 = con.cursor() cur2 = con.cursor() print con.numcursors
[ [ 1, 0, 0.0667, 0.0667, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.4333, 0.5333, 0, 0.66, 0.2, 293, 0, 2, 0, 0, 809, 0, 2 ], [ 2, 1, 0.3333, 0.2, 1, 0.79, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "class CountCursorsConnection(sqlite3.Connection):\n def __init__(self, *args, **kwargs):\n sqlite3.Connection.__init__(self, *args, **kwargs)\n self.numcursors = 0\n\n def cursor(self, *args, **kwargs):\n self.numcursors += 1\n return s...
from pysqlite2 import dbapi2 as sqlite3 def collate_reverse(string1, string2): return -cmp(string1, string2) con = sqlite3.connect(":memory:") con.create_collation("reverse", collate_reverse) cur = con.cursor() cur.execute("create table test(x)") cur.executemany("insert into test(x) values (?)", [("a",), ("b",)]) cur.execute("select x from test order by x collate reverse") for row in cur: print row con.close()
[ [ 1, 0, 0.0667, 0.0667, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 2, 0, 0.2333, 0.1333, 0, 0.66, 0.1111, 722, 0, 2, 1, 0, 0, 0, 1 ], [ 13, 1, 0.2667, 0.0667, 1, 0...
[ "from pysqlite2 import dbapi2 as sqlite3", "def collate_reverse(string1, string2):\n return -cmp(string1, string2)", " return -cmp(string1, string2)", "con = sqlite3.connect(\":memory:\")", "con.create_collation(\"reverse\", collate_reverse)", "cur = con.cursor()", "cur.execute(\"create table test...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect(":memory:") cur = con.cursor() cur.executescript(""" create table person( firstname, lastname, age ); create table book( title, author, published ); insert into book(title, author, published) values ( 'Dirk Gently''s Holistic Detective Agency', 'Douglas Adams', 1987 ); """)
[ [ 1, 0, 0.0417, 0.0417, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.125, 0.0417, 0, 0.66, 0.3333, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.1667, 0.0417, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\":memory:\")", "cur = con.cursor()", "cur.executescript(\"\"\"\n create table person(\n firstname,\n lastname,\n age\n );\n\n create table book(" ]
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect(":memory:") # enable extension loading con.enable_load_extension(True) # Load the fulltext search extension con.execute("select load_extension('./fts3.so')") # alternatively you can load the extension using an API call: # con.load_extension("./fts3.so") # disable extension laoding again con.enable_load_extension(False) # example from SQLite wiki con.execute("create virtual table recipe using fts3(name, ingredients)") con.executescript(""" insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes'); insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery'); insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour'); insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter'); """) for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"): print row
[ [ 1, 0, 0.0357, 0.0357, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.1071, 0.0357, 0, 0.66, 0.1429, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 8, 0, 0.2143, 0.0357, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\":memory:\")", "con.enable_load_extension(True)", "con.execute(\"select load_extension('./fts3.so')\")", "con.enable_load_extension(False)", "con.execute(\"create virtual table recipe using fts3(name, ingredients)\")", "con.executescript...
from pysqlite2 import dbapi2 as sqlite3 persons = [ ("Hugo", "Boss"), ("Calvin", "Klein") ] con = sqlite3.connect(":memory:") # Create the table con.execute("create table person(firstname, lastname)") # Fill the table con.executemany("insert into person(firstname, lastname) values (?, ?)", persons) # Print the table contents for row in con.execute("select firstname, lastname from person"): print row # Using a dummy WHERE clause to not let SQLite take the shortcut table deletes. print "I just deleted", con.execute("delete from person where 1=1").rowcount, "rows"
[ [ 1, 0, 0.0476, 0.0476, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.2143, 0.1905, 0, 0.66, 0.1667, 70, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 0, 0.381, 0.0476, 0, 0....
[ "from pysqlite2 import dbapi2 as sqlite3", "persons = [\n (\"Hugo\", \"Boss\"),\n (\"Calvin\", \"Klein\")\n ]", "con = sqlite3.connect(\":memory:\")", "con.execute(\"create table person(firstname, lastname)\")", "con.executemany(\"insert into person(firstname, lastname) values (?, ?)\", persons)", ...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb") cur = con.cursor() who = "Yeltsin" age = 72 cur.execute("select name_last, age from people where name_last=:who and age=:age", locals()) print cur.fetchone()
[ [ 1, 0, 0.0833, 0.0833, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.25, 0.0833, 0, 0.66, 0.1667, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.4167, 0.0833, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "cur = con.cursor()", "who = \"Yeltsin\"", "age = 72", "cur.execute(\"select name_last, age from people where name_last=:who and age=:age\",\n locals())", "print(cur.fetchone())" ]
# Not referenced from the documentation, but builds the database file the other # code snippets expect. from pysqlite2 import dbapi2 as sqlite3 import os DB_FILE = "mydb" if os.path.exists(DB_FILE): os.remove(DB_FILE) con = sqlite3.connect(DB_FILE) cur = con.cursor() cur.execute(""" create table people ( name_last varchar(20), age integer ) """) cur.execute("insert into people (name_last, age) values ('Yeltsin', 72)") cur.execute("insert into people (name_last, age) values ('Putin', 51)") con.commit() cur.close() con.close()
[ [ 1, 0, 0.1429, 0.0357, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.1786, 0.0357, 0, 0.66, 0.0909, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 14, 0, 0.25, 0.0357, 0, 0...
[ "from pysqlite2 import dbapi2 as sqlite3", "import os", "DB_FILE = \"mydb\"", "if os.path.exists(DB_FILE):\n os.remove(DB_FILE)", " os.remove(DB_FILE)", "con = sqlite3.connect(DB_FILE)", "cur = con.cursor()", "cur.execute(\"\"\"\n create table people\n (\n name_last v...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb") cur = con.cursor() who = "Yeltsin" age = 72 cur.execute("select name_last, age from people where name_last=:who and age=:age", {"who": who, "age": age}) print cur.fetchone()
[ [ 1, 0, 0.0833, 0.0833, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.25, 0.0833, 0, 0.66, 0.1667, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.4167, 0.0833, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "cur = con.cursor()", "who = \"Yeltsin\"", "age = 72", "cur.execute(\"select name_last, age from people where name_last=:who and age=:age\",\n {\"who\": who, \"age\": age})", "print(cur.fetchone())" ]
from pysqlite2 import dbapi2 as sqlite3 import datetime con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_COLNAMES) cur = con.cursor() cur.execute('select ? as "x [timestamp]"', (datetime.datetime.now(),)) dt = cur.fetchone()[0] print dt, type(dt)
[ [ 1, 0, 0.125, 0.125, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.25, 0.125, 0, 0.66, 0.1667, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 14, 0, 0.5, 0.125, 0, 0.66, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "import datetime", "con = sqlite3.connect(\":memory:\", detect_types=sqlite3.PARSE_COLNAMES)", "cur = con.cursor()", "cur.execute('select ? as \"x [timestamp]\"', (datetime.datetime.now(),))", "dt = cur.fetchone()[0]", "print(dt, type(dt))" ]
from pysqlite2 import dbapi2 as sqlite3 def char_generator(): import string for c in string.letters[:26]: yield (c,) con = sqlite3.connect(":memory:") cur = con.cursor() cur.execute("create table characters(c)") cur.executemany("insert into characters(c) values (?)", char_generator()) cur.execute("select c from characters") print cur.fetchall()
[ [ 1, 0, 0.0667, 0.0667, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 2, 0, 0.3, 0.2667, 0, 0.66, 0.1429, 806, 0, 0, 0, 0, 0, 0, 0 ], [ 1, 1, 0.2667, 0.0667, 1, 0.41,...
[ "from pysqlite2 import dbapi2 as sqlite3", "def char_generator():\n import string\n for c in string.letters[:26]:\n yield (c,)", " import string", " for c in string.letters[:26]:\n yield (c,)", " yield (c,)", "con = sqlite3.connect(\":memory:\")", "cur = con.cursor()", ...
from pysqlite2 import dbapi2 as sqlite3 con = sqlite3.connect("mydb") cur = con.cursor() newPeople = ( ('Lebed' , 53), ('Zhirinovsky' , 57), ) for person in newPeople: cur.execute("insert into people (name_last, age) values (?, ?)", person) # The changes will not be saved unless the transaction is committed explicitly: con.commit()
[ [ 1, 0, 0.0625, 0.0625, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 14, 0, 0.1875, 0.0625, 0, 0.66, 0.2, 761, 3, 1, 0, 0, 242, 10, 1 ], [ 14, 0, 0.3125, 0.0625, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "con = sqlite3.connect(\"mydb\")", "cur = con.cursor()", "newPeople = (\n ('Lebed' , 53),\n ('Zhirinovsky' , 57),\n )", "for person in newPeople:\n cur.execute(\"insert into people (name_last, age) values (?, ?)\", person)", " cur.execute(\"ins...
from pysqlite2 import dbapi2 as sqlite3 class Point(object): def __init__(self, x, y): self.x, self.y = x, y def __conform__(self, protocol): if protocol is sqlite3.PrepareProtocol: return "%f;%f" % (self.x, self.y) con = sqlite3.connect(":memory:") cur = con.cursor() p = Point(4.0, -3.2) cur.execute("select ?", (p,)) print cur.fetchone()[0]
[ [ 1, 0, 0.0625, 0.0625, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.375, 0.4375, 0, 0.66, 0.1667, 652, 0, 2, 0, 0, 186, 0, 0 ], [ 2, 1, 0.2812, 0.125, 1, 0....
[ "from pysqlite2 import dbapi2 as sqlite3", "class Point(object):\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n def __conform__(self, protocol):\n if protocol is sqlite3.PrepareProtocol:\n return \"%f;%f\" % (self.x, self.y)", " def __init__(self, x, y):\n self....
from pysqlite2 import dbapi2 as sqlite3 class Point(object): def __init__(self, x, y): self.x, self.y = x, y def __repr__(self): return "(%f;%f)" % (self.x, self.y) def adapt_point(point): return "%f;%f" % (point.x, point.y) def convert_point(s): x, y = map(float, s.split(";")) return Point(x, y) # Register the adapter sqlite3.register_adapter(Point, adapt_point) # Register the converter sqlite3.register_converter("point", convert_point) p = Point(4.0, -3.2) ######################### # 1) Using declared types con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES) cur = con.cursor() cur.execute("create table test(p point)") cur.execute("insert into test(p) values (?)", (p,)) cur.execute("select p from test") print "with declared types:", cur.fetchone()[0] cur.close() con.close() ####################### # 1) Using column names con = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_COLNAMES) cur = con.cursor() cur.execute("create table test(p)") cur.execute("insert into test(p) values (?)", (p,)) cur.execute('select p as "p [point]" from test') print "with column names:", cur.fetchone()[0] cur.close() con.close()
[ [ 1, 0, 0.0213, 0.0213, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.117, 0.1277, 0, 0.66, 0.0455, 652, 0, 2, 0, 0, 186, 0, 0 ], [ 2, 1, 0.0957, 0.0426, 1, 0...
[ "from pysqlite2 import dbapi2 as sqlite3", "class Point(object):\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n def __repr__(self):\n return \"(%f;%f)\" % (self.x, self.y)", " def __init__(self, x, y):\n self.x, self.y = x, y", " self.x, self.y = x, y", " def...
from pysqlite2 import dbapi2 as sqlite3 class IterChars: def __init__(self): self.count = ord('a') def __iter__(self): return self def next(self): if self.count > ord('z'): raise StopIteration self.count += 1 return (chr(self.count - 1),) # this is a 1-tuple con = sqlite3.connect(":memory:") cur = con.cursor() cur.execute("create table characters(c)") theIter = IterChars() cur.executemany("insert into characters(c) values (?)", theIter) cur.execute("select c from characters") print cur.fetchall()
[ [ 1, 0, 0.0417, 0.0417, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.3542, 0.5, 0, 0.66, 0.125, 192, 0, 3, 0, 0, 0, 0, 3 ], [ 2, 1, 0.1875, 0.0833, 1, 0.87, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "class IterChars:\n def __init__(self):\n self.count = ord('a')\n\n def __iter__(self):\n return self\n\n def next(self):", " def __init__(self):\n self.count = ord('a')", " self.count = ord('a')", " def __iter__(self):\n ...
from pysqlite2 import dbapi2 as sqlite3 import apsw apsw_con = apsw.Connection(":memory:") apsw_con.createscalarfunction("times_two", lambda x: 2*x, 1) # Create pysqlite connection from APSW connection con = sqlite3.connect(apsw_con) result = con.execute("select times_two(15)").fetchone()[0] assert result == 30 con.close()
[ [ 1, 0, 0.0833, 0.0833, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.1667, 0.0833, 0, 0.66, 0.1667, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 14, 0, 0.3333, 0.0833, 0, ...
[ "from pysqlite2 import dbapi2 as sqlite3", "import apsw", "apsw_con = apsw.Connection(\":memory:\")", "apsw_con.createscalarfunction(\"times_two\", lambda x: 2*x, 1)", "con = sqlite3.connect(apsw_con)", "result = con.execute(\"select times_two(15)\").fetchone()[0]", "con.close()" ]
from pysqlite2 import dbapi2 as sqlite3 import datetime, time def adapt_datetime(ts): return time.mktime(ts.timetuple()) sqlite3.register_adapter(datetime.datetime, adapt_datetime) con = sqlite3.connect(":memory:") cur = con.cursor() now = datetime.datetime.now() cur.execute("select ?", (now,)) print cur.fetchone()[0]
[ [ 1, 0, 0.0714, 0.0714, 0, 0.66, 0, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 1, 0, 0.1429, 0.0714, 0, 0.66, 0.125, 426, 0, 2, 0, 0, 426, 0, 0 ], [ 2, 0, 0.3214, 0.1429, 0, 0...
[ "from pysqlite2 import dbapi2 as sqlite3", "import datetime, time", "def adapt_datetime(ts):\n return time.mktime(ts.timetuple())", " return time.mktime(ts.timetuple())", "sqlite3.register_adapter(datetime.datetime, adapt_datetime)", "con = sqlite3.connect(\":memory:\")", "cur = con.cursor()", "...
# Author: Paul Kippes <kippesp@gmail.com> import unittest from pysqlite2 import dbapi2 as sqlite class DumpTests(unittest.TestCase): def setUp(self): self.cx = sqlite.connect(":memory:") self.cu = self.cx.cursor() def tearDown(self): self.cx.close() def CheckTableDump(self): expected_sqls = [ "CREATE TABLE t1(id integer primary key, s1 text, " \ "t1_i1 integer not null, i2 integer, unique (s1), " \ "constraint t1_idx1 unique (i2));" , "INSERT INTO \"t1\" VALUES(1,'foo',10,20);" , "INSERT INTO \"t1\" VALUES(2,'foo2',30,30);" , "CREATE TABLE t2(id integer, t2_i1 integer, " \ "t2_i2 integer, primary key (id)," \ "foreign key(t2_i1) references t1(t1_i1));" , "CREATE TRIGGER trigger_1 update of t1_i1 on t1 " \ "begin " \ "update t2 set t2_i1 = new.t1_i1 where t2_i1 = old.t1_i1; " \ "end;" , "CREATE VIEW v1 as select * from t1 left join t2 " \ "using (id);" ] [self.cu.execute(s) for s in expected_sqls] i = self.cx.iterdump() actual_sqls = [s for s in i] expected_sqls = ['BEGIN TRANSACTION;'] + expected_sqls + \ ['COMMIT;'] [self.assertEqual(expected_sqls[i], actual_sqls[i]) for i in xrange(len(expected_sqls))] def suite(): return unittest.TestSuite(unittest.makeSuite(DumpTests, "Check")) def test(): runner = unittest.TextTestRunner() runner.run(suite()) if __name__ == "__main__": test()
[ [ 1, 0, 0.0577, 0.0192, 0, 0.66, 0, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0769, 0.0192, 0, 0.66, 0.2, 987, 0, 1, 0, 0, 987, 0, 0 ], [ 3, 0, 0.4615, 0.7115, 0, 0.66,...
[ "import unittest", "from pysqlite2 import dbapi2 as sqlite", "class DumpTests(unittest.TestCase):\n def setUp(self):\n self.cx = sqlite.connect(\":memory:\")\n self.cu = self.cx.cursor()\n\n def tearDown(self):\n self.cx.close()", " def setUp(self):\n self.cx = sqlite.conn...
# Mimic the sqlite3 console shell's .dump command # Author: Paul Kippes <kippesp@gmail.com> def _iterdump(connection): """ Returns an iterator to the dump of the database in an SQL text format. Used to produce an SQL dump of the database. Useful to save an in-memory database for later restoration. This function should not be called directly but instead called from the Connection method, iterdump(). """ cu = connection.cursor() yield('BEGIN TRANSACTION;') # sqlite_master table contains the SQL CREATE statements for the database. q = """ SELECT name, type, sql FROM sqlite_master WHERE sql NOT NULL AND type == 'table' """ schema_res = cu.execute(q) for table_name, type, sql in schema_res.fetchall(): if table_name == 'sqlite_sequence': yield('DELETE FROM sqlite_sequence;') elif table_name == 'sqlite_stat1': yield('ANALYZE sqlite_master;') elif table_name.startswith('sqlite_'): continue # NOTE: Virtual table support not implemented #elif sql.startswith('CREATE VIRTUAL TABLE'): # qtable = table_name.replace("'", "''") # yield("INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"\ # "VALUES('table','%s','%s',0,'%s');" % # qtable, # qtable, # sql.replace("''")) else: yield('%s;' % sql) # Build the insert statement for each row of the current table res = cu.execute("PRAGMA table_info('%s')" % table_name) column_names = [str(table_info[1]) for table_info in res.fetchall()] q = "SELECT 'INSERT INTO \"%(tbl_name)s\" VALUES(" q += ",".join(["'||quote(" + col + ")||'" for col in column_names]) q += ")' FROM '%(tbl_name)s'" query_res = cu.execute(q % {'tbl_name': table_name}) for row in query_res: yield("%s;" % row[0]) # Now when the type is 'index', 'trigger', or 'view' q = """ SELECT name, type, sql FROM sqlite_master WHERE sql NOT NULL AND type IN ('index', 'trigger', 'view') """ schema_res = cu.execute(q) for name, type, sql in schema_res.fetchall(): yield('%s;' % sql) yield('COMMIT;')
[ [ 2, 0, 0.5317, 0.9524, 0, 0.66, 0, 339, 0, 1, 0, 0, 0, 0, 11 ], [ 8, 1, 0.127, 0.1111, 1, 0.84, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 1, 0.2063, 0.0159, 1, 0.84, ...
[ "def _iterdump(connection):\n \"\"\"\n Returns an iterator to the dump of the database in an SQL text format.\n\n Used to produce an SQL dump of the database. Useful to save an in-memory\n database for later restoration. This function should not be called\n directly but instead called from the Conn...
#coding:utf8 from bottle import route, run, debug, template, request, validate, error, response, redirect from y_common import * from weibopy.auth import WebOAuthHandler from weibopy import oauth @route('/login') def login(): redirect('/sina/login') @route('/sina/login') def sina_login(): auth = WebOAuthHandler(sina_consumer_key, sina_consumer_secret) auth_url = auth.get_authorization_url_with_callback(baseurl + "/sina/callback/") redirect(auth_url) @route('/sina/callback/:request_token') def sina_callback(request_token): oauth_verifier = request.GET.get('oauth_verifier', None) auth = WebOAuthHandler(sina_consumer_key, sina_consumer_secret, oauth.OAuthToken.from_string(request_token)) token = auth.get_access_token(oauth_verifier) response.set_cookie("ybole_auth", token, secret = gng_secret) redirect('/')
[ [ 1, 0, 0.087, 0.0435, 0, 0.66, 0, 591, 0, 9, 0, 0, 591, 0, 0 ], [ 1, 0, 0.1304, 0.0435, 0, 0.66, 0.1667, 64, 0, 1, 0, 0, 64, 0, 0 ], [ 1, 0, 0.1739, 0.0435, 0, 0.6...
[ "from bottle import route, run, debug, template, request, validate, error, response, redirect", "from y_common import *", "from weibopy.auth import WebOAuthHandler", "from weibopy import oauth", "def login():\n redirect('/sina/login')", " redirect('/sina/login')", "def sina_login():\n auth = We...
from bottle import route, run, debug, template, request, validate, error, response, redirect @route('/apply/sent') @route('/apply/sent/show') def apply_sent_show(): return template('home') @route('/apply/sent/add/:tweet_id') def apply_sent_add(tweet_id): return template('home') @route('/apply/sent/exist/:tweet_id') def apply_sent_exist(tweet_id): return template('home') @route('/apply/sent/count') def apply_sent_count(): return template('home') @route('/apply/sent/delete/:tweet_id') def apply_sent_delete(tweet_id): return template('home')
[ [ 1, 0, 0.0455, 0.0455, 0, 0.66, 0, 591, 0, 9, 0, 0, 591, 0, 0 ], [ 2, 0, 0.25, 0.0909, 0, 0.66, 0.2, 409, 0, 0, 1, 0, 0, 0, 3 ], [ 13, 1, 0.2727, 0.0455, 1, 0.31, ...
[ "from bottle import route, run, debug, template, request, validate, error, response, redirect", "def apply_sent_show():\n return template('home')", " return template('home')", "def apply_sent_add(tweet_id):\n return template('home')", " return template('home')", "def apply_sent_exist(tweet_id)...
from bottle import route, run, debug, template, request, validate, error, response, redirect @route('/') @route('/home') def home(): return template('home') #return 'Ybole - Python backend ... Coming soon!'
[ [ 1, 0, 0.1429, 0.1429, 0, 0.66, 0, 591, 0, 9, 0, 0, 591, 0, 0 ], [ 2, 0, 0.7857, 0.2857, 0, 0.66, 1, 475, 0, 0, 1, 0, 0, 0, 3 ], [ 13, 1, 0.8571, 0.1429, 1, 0.92, ...
[ "from bottle import route, run, debug, template, request, validate, error, response, redirect", "def home():\n return template('home')", " return template('home')" ]
from bottle import route, run, debug, template, request, validate, error, response, redirect @route('/admin/') def admin(): return template("home") @route('/admin/tag') def admin_tag(): return template("home") @route('/admin/tag/edit') def admin_tag(): return template("home")
[ [ 1, 0, 0.0769, 0.0769, 0, 0.66, 0, 591, 0, 9, 0, 0, 591, 0, 0 ], [ 2, 0, 0.3462, 0.1538, 0, 0.66, 0.3333, 426, 0, 0, 1, 0, 0, 0, 2 ], [ 13, 1, 0.3846, 0.0769, 1, 0...
[ "from bottle import route, run, debug, template, request, validate, error, response, redirect", "def admin():\n return template(\"home\")", " return template(\"home\")", "def admin_tag():\n return template(\"home\")", " return template(\"home\")", "def admin_tag():\n return template(\"home\...
import re, base64, json baseurl = "http://www.ybole.com:81" gng_secret = "HUSTGNGisVeryGelivable" sina_consumer_key= "961495784" sina_consumer_secret ="47d9d806a1dc04cc758be6f7213465bc" def htmlEncode(str): """ Returns the HTML encoded version of the given string. This is useful to display a plain ASCII text string on a web page.""" htmlCodes = [ ['&', '&amp;'], ['<', '&lt;'], ['>', '&gt;'], ['"', '&quot;'], ] for orig, repl in htmlCodes: str = str.replace(orig, repl) return str def jsonencode(x): data = dict(x) return json.dumps(data)
[ [ 1, 0, 0.04, 0.04, 0, 0.66, 0, 540, 0, 3, 0, 0, 540, 0, 0 ], [ 14, 0, 0.12, 0.04, 0, 0.66, 0.1667, 660, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.2, 0.04, 0, 0.66, 0.3...
[ "import re, base64, json", "baseurl = \"http://www.ybole.com:81\"", "gng_secret = \"HUSTGNGisVeryGelivable\"", "sina_consumer_key= \"961495784\"", "sina_consumer_secret =\"47d9d806a1dc04cc758be6f7213465bc\"", "def htmlEncode(str):\n \"\"\" Returns the HTML encoded version of the given string. This is u...
# Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. class WeibopError(Exception): """Weibopy exception""" def __init__(self, reason): try: self.reason = reason.encode('utf-8') except: self.reason = reason def __str__(self): return self.reason
[ [ 3, 0, 0.625, 0.6875, 0, 0.66, 0, 305, 0, 2, 0, 0, 645, 0, 1 ], [ 8, 1, 0.375, 0.0625, 1, 0.81, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 2, 1, 0.625, 0.3125, 1, 0.81, 0....
[ "class WeibopError(Exception):\n \"\"\"Weibopy exception\"\"\"\n\n def __init__(self, reason):\n try:\n self.reason = reason.encode('utf-8')\n except:\n self.reason = reason", " \"\"\"Weibopy exception\"\"\"", " def __init__(self, reason):\n try:\n ...
# Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. from weibopy.utils import parse_datetime, parse_html_value, parse_a_href, \ parse_search_datetime, unescape_html class ResultSet(list): """A list like object that holds results from a Twitter API query.""" class Model(object): def __init__(self, api=None): self._api = api def __getstate__(self): # pickle pickle = dict(self.__dict__) del pickle['_api'] # do not pickle the API reference return pickle @classmethod def parse(cls, api, json): """Parse a JSON object into a model instance.""" raise NotImplementedError @classmethod def parse_list(cls, api, json_list): """Parse a list of JSON objects into a result set of model instances.""" results = ResultSet() for obj in json_list: results.append(cls.parse(api, obj)) return results class Status(Model): @classmethod def parse(cls, api, json): status = cls(api) for k, v in json.items(): if k == 'user': user = User.parse(api, v) setattr(status, 'author', user) setattr(status, 'user', user) # DEPRECIATED elif k == 'screen_name': setattr(status, k, v) elif k == 'created_at': setattr(status, k, parse_datetime(v)) elif k == 'source': if '<' in v: setattr(status, k, parse_html_value(v)) setattr(status, 'source_url', parse_a_href(v)) else: setattr(status, k, v) elif k == 'retweeted_status': setattr(status, k, User.parse(api, v)) elif k == 'geo': setattr(status, k, Geo.parse(api, v)) else: setattr(status, k, v) return status def destroy(self): return self._api.destroy_status(self.id) def retweet(self): return self._api.retweet(self.id) def retweets(self): return self._api.retweets(self.id) def favorite(self): return self._api.create_favorite(self.id) class Geo(Model): @classmethod def parse(cls, api, json): geo = cls(api) if json is not None: for k, v in json.items(): setattr(geo, k, v) return geo class Comments(Model): @classmethod def parse(cls, api, json): comments = cls(api) for k, v in json.items(): if k == 'user': user = User.parse(api, v) setattr(comments, 'author', user) setattr(comments, 'user', user) elif k == 'status': status = Status.parse(api, v) setattr(comments, 'user', status) elif k == 'created_at': setattr(comments, k, parse_datetime(v)) elif k == 'reply_comment': setattr(comments, k, User.parse(api, v)) else: setattr(comments, k, v) return comments def destroy(self): return self._api.destroy_status(self.id) def retweet(self): return self._api.retweet(self.id) def retweets(self): return self._api.retweets(self.id) def favorite(self): return self._api.create_favorite(self.id) class User(Model): @classmethod def parse(cls, api, json): user = cls(api) for k, v in json.items(): if k == 'created_at': setattr(user, k, parse_datetime(v)) elif k == 'status': setattr(user, k, Status.parse(api, v)) elif k == 'screen_name': setattr(user, k, v) elif k == 'following': # twitter sets this to null if it is false if v is True: setattr(user, k, True) else: setattr(user, k, False) else: setattr(user, k, v) return user @classmethod def parse_list(cls, api, json_list): if isinstance(json_list, list): item_list = json_list else: item_list = json_list['users'] results = ResultSet() for obj in item_list: results.append(cls.parse(api, obj)) return results def timeline(self, **kargs): return self._api.user_timeline(user_id=self.id, **kargs) def friends(self, **kargs): return self._api.friends(user_id=self.id, **kargs) def followers(self, **kargs): return self._api.followers(user_id=self.id, **kargs) def follow(self): self._api.create_friendship(user_id=self.id) self.following = True def unfollow(self): self._api.destroy_friendship(user_id=self.id) self.following = False def lists_memberships(self, *args, **kargs): return self._api.lists_memberships(user=self.screen_name, *args, **kargs) def lists_subscriptions(self, *args, **kargs): return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs) def lists(self, *args, **kargs): return self._api.lists(user=self.screen_name, *args, **kargs) def followers_ids(self, *args, **kargs): return self._api.followers_ids(user_id=self.id, *args, **kargs) class DirectMessage(Model): @classmethod def parse(cls, api, json): dm = cls(api) for k, v in json.items(): if k == 'sender' or k == 'recipient': setattr(dm, k, User.parse(api, v)) elif k == 'created_at': setattr(dm, k, parse_datetime(v)) else: setattr(dm, k, v) return dm class Friendship(Model): @classmethod def parse(cls, api, json): source = cls(api) for k, v in json['source'].items(): setattr(source, k, v) # parse target target = cls(api) for k, v in json['target'].items(): setattr(target, k, v) return source, target class SavedSearch(Model): @classmethod def parse(cls, api, json): ss = cls(api) for k, v in json.items(): if k == 'created_at': setattr(ss, k, parse_datetime(v)) else: setattr(ss, k, v) return ss def destroy(self): return self._api.destroy_saved_search(self.id) class SearchResult(Model): @classmethod def parse(cls, api, json): result = cls() for k, v in json.items(): if k == 'created_at': setattr(result, k, parse_search_datetime(v)) elif k == 'source': setattr(result, k, parse_html_value(unescape_html(v))) else: setattr(result, k, v) return result @classmethod def parse_list(cls, api, json_list, result_set=None): results = ResultSet() results.max_id = json_list.get('max_id') results.since_id = json_list.get('since_id') results.refresh_url = json_list.get('refresh_url') results.next_page = json_list.get('next_page') results.results_per_page = json_list.get('results_per_page') results.page = json_list.get('page') results.completed_in = json_list.get('completed_in') results.query = json_list.get('query') for obj in json_list['results']: results.append(cls.parse(api, obj)) return results class List(Model): @classmethod def parse(cls, api, json): lst = List(api) for k,v in json.items(): if k == 'user': setattr(lst, k, User.parse(api, v)) else: setattr(lst, k, v) return lst @classmethod def parse_list(cls, api, json_list, result_set=None): results = ResultSet() for obj in json_list['lists']: results.append(cls.parse(api, obj)) return results def update(self, **kargs): return self._api.update_list(self.slug, **kargs) def destroy(self): return self._api.destroy_list(self.slug) def timeline(self, **kargs): return self._api.list_timeline(self.user.screen_name, self.slug, **kargs) def add_member(self, id): return self._api.add_list_member(self.slug, id) def remove_member(self, id): return self._api.remove_list_member(self.slug, id) def members(self, **kargs): return self._api.list_members(self.user.screen_name, self.slug, **kargs) def is_member(self, id): return self._api.is_list_member(self.user.screen_name, self.slug, id) def subscribe(self): return self._api.subscribe_list(self.user.screen_name, self.slug) def unsubscribe(self): return self._api.unsubscribe_list(self.user.screen_name, self.slug) def subscribers(self, **kargs): return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs) def is_subscribed(self, id): return self._api.is_subscribed_list(self.user.screen_name, self.slug, id) class JSONModel(Model): @classmethod def parse(cls, api, json): lst = JSONModel(api) for k,v in json.items(): setattr(lst, k, v) return lst class IDSModel(Model): @classmethod def parse(cls, api, json): ids = IDSModel(api) for k, v in json.items(): setattr(ids, k, v) return ids class Counts(Model): @classmethod def parse(cls, api, json): ids = Counts(api) for k, v in json.items(): setattr(ids, k, v) return ids class ModelFactory(object): """ Used by parsers for creating instances of models. You may subclass this factory to add your own extended models. """ status = Status comments = Comments user = User direct_message = DirectMessage friendship = Friendship saved_search = SavedSearch search_result = SearchResult list = List json = JSONModel ids_list = IDSModel counts = Counts
[ [ 1, 0, 0.0155, 0.0056, 0, 0.66, 0, 478, 0, 5, 0, 0, 478, 0, 0 ], [ 3, 0, 0.0239, 0.0056, 0, 0.66, 0.0667, 885, 0, 0, 0, 0, 430, 0, 0 ], [ 8, 1, 0.0254, 0.0028, 1, ...
[ "from weibopy.utils import parse_datetime, parse_html_value, parse_a_href, \\\n parse_search_datetime, unescape_html", "class ResultSet(list):\n \"\"\"A list like object that holds results from a Twitter API query.\"\"\"", " \"\"\"A list like object that holds results from a Twitter API query.\"\"\...
""" The MIT License Copyright (c) 2007 Leah Culver Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import cgi import urllib import time import random import urlparse import hmac import binascii VERSION = '1.0' # Hi Blaine! HTTP_METHOD = 'GET' SIGNATURE_METHOD = 'PLAINTEXT' class OAuthError(RuntimeError): """Generic exception class.""" def __init__(self, message='OAuth error occured.'): self.message = message def build_authenticate_header(realm=''): """Optional WWW-Authenticate header (401 error)""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} def escape(s): """Escape a URL including any /.""" return urllib.quote(s, safe='~') def _utf8_str(s): """Convert unicode to utf-8.""" if isinstance(s, unicode): return s.encode("utf-8") else: return str(s) def generate_timestamp(): """Get seconds since epoch (UTC).""" return int(time.time()) def generate_nonce(length=8): """Generate pseudorandom number.""" return ''.join([str(random.randint(0, 9)) for i in range(length)]) def generate_verifier(length=8): """Generate pseudorandom number.""" return ''.join([str(random.randint(0, 9)) for i in range(length)]) class OAuthConsumer(object): """Consumer of OAuth authentication. OAuthConsumer is a data type that represents the identity of the Consumer via its shared secret with the Service Provider. """ key = None secret = None def __init__(self, key, secret): self.key = key self.secret = secret class OAuthToken(object): """OAuthToken is a data type that represents an End User via either an access or request token. key -- the token secret -- the token secret """ key = None secret = None callback = None callback_confirmed = None verifier = None def __init__(self, key, secret): self.key = key self.secret = secret def set_callback(self, callback): self.callback = callback self.callback_confirmed = 'true' def set_verifier(self, verifier=None): if verifier is not None: self.verifier = verifier else: self.verifier = generate_verifier() def get_callback_url(self): if self.callback and self.verifier: # Append the oauth_verifier. parts = urlparse.urlparse(self.callback) scheme, netloc, path, params, query, fragment = parts[:6] if query: query = '%s&oauth_verifier=%s' % (query, self.verifier) else: query = 'oauth_verifier=%s' % self.verifier return urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) return self.callback def to_string(self): data = { 'oauth_token': self.key, 'oauth_token_secret': self.secret, } if self.callback_confirmed is not None: data['oauth_callback_confirmed'] = self.callback_confirmed return urllib.urlencode(data) def from_string(s): """ Returns a token from something like: oauth_token_secret=xxx&oauth_token=xxx """ params = cgi.parse_qs(s, keep_blank_values=False) key = params['oauth_token'][0] secret = params['oauth_token_secret'][0] token = OAuthToken(key, secret) try: token.callback_confirmed = params['oauth_callback_confirmed'][0] except KeyError: pass # 1.0, no callback confirmed. return token from_string = staticmethod(from_string) def __str__(self): return self.to_string() class OAuthRequest(object): """OAuthRequest represents the request and can be serialized. OAuth parameters: - oauth_consumer_key - oauth_token - oauth_signature_method - oauth_signature - oauth_timestamp - oauth_nonce - oauth_version - oauth_verifier ... any additional parameters, as defined by the Service Provider. """ parameters = None # OAuth parameters. http_method = HTTP_METHOD http_url = None version = VERSION def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): self.http_method = http_method self.http_url = http_url self.parameters = parameters or {} def set_parameter(self, parameter, value): self.parameters[parameter] = value def get_parameter(self, parameter): try: return self.parameters[parameter] except: raise OAuthError('Parameter not found: %s' % parameter) def _get_timestamp_nonce(self): return self.get_parameter('oauth_timestamp'), self.get_parameter( 'oauth_nonce') def get_nonoauth_parameters(self): """Get any non-OAuth parameters.""" parameters = {} for k, v in self.parameters.iteritems(): # Ignore oauth parameters. if k.find('oauth_') < 0: parameters[k] = v return parameters def to_header(self, realm=''): """Serialize as a header for an HTTPAuth request.""" auth_header = 'OAuth realm="%s"' % realm # Add the oauth parameters. if self.parameters: for k, v in self.parameters.iteritems(): if k[:6] == 'oauth_': auth_header += ', %s="%s"' % (k, escape(str(v))) return {'Authorization': auth_header} def to_postdata(self): """Serialize as post data for a POST request.""" return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \ for k, v in self.parameters.iteritems()]) def to_url(self): """Serialize as a URL for a GET request.""" return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) def get_normalized_parameters(self): """Return a string that contains the parameters that must be signed.""" params = self.parameters try: # Exclude the signature if it exists. del params['oauth_signature'] except: pass # Escape key values before sorting. key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \ for k,v in params.items()] # Sort lexicographically, first after key, then after value. key_values.sort() # Combine key value pairs into a string. return '&'.join(['%s=%s' % (k, v) for k, v in key_values]) def get_normalized_http_method(self): """Uppercases the http method.""" return self.http_method.upper() def get_normalized_http_url(self): """Parses the URL and rebuilds it to be scheme://host/path.""" parts = urlparse.urlparse(self.http_url) scheme, netloc, path = parts[:3] # Exclude default port numbers. if scheme == 'http' and netloc[-3:] == ':80': netloc = netloc[:-3] elif scheme == 'https' and netloc[-4:] == ':443': netloc = netloc[:-4] return '%s://%s%s' % (scheme, netloc, path) def sign_request(self, signature_method, consumer, token): """Set the signature parameter to the result of build_signature.""" # Set the signature method. self.set_parameter('oauth_signature_method', signature_method.get_name()) # Set the signature. self.set_parameter('oauth_signature',self.build_signature(signature_method, consumer, token)) def build_signature(self, signature_method, consumer, token): """Calls the build signature method within the signature method.""" return signature_method.build_signature(self, consumer, token) def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): """Combines multiple parameter sources.""" if parameters is None: parameters = {} # Headers if headers and 'Authorization' in headers: auth_header = headers['Authorization'] # Check that the authorization header is OAuth. if auth_header[:6] == 'OAuth ': auth_header = auth_header[6:] try: # Get the parameters from the header. header_params = OAuthRequest._split_header(auth_header) parameters.update(header_params) except: raise OAuthError('Unable to parse OAuth parameters from ' 'Authorization header.') # GET or POST query string. if query_string: query_params = OAuthRequest._split_url_string(query_string) parameters.update(query_params) # URL parameters. param_str = urlparse.urlparse(http_url)[4] # query url_params = OAuthRequest._split_url_string(param_str) parameters.update(url_params) if parameters: return OAuthRequest(http_method, http_url, parameters) return None from_request = staticmethod(from_request) def from_consumer_and_token(oauth_consumer, token=None, callback=None, verifier=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} defaults = { 'oauth_consumer_key': oauth_consumer.key, 'oauth_timestamp': generate_timestamp(), 'oauth_nonce': generate_nonce(), 'oauth_version': OAuthRequest.version, } defaults.update(parameters) parameters = defaults if token: parameters['oauth_token'] = token.key if token.callback: parameters['oauth_callback'] = token.callback # 1.0a support for verifier. if verifier: parameters['oauth_verifier'] = verifier elif callback: # 1.0a support for callback in the request token request. parameters['oauth_callback'] = callback return OAuthRequest(http_method, http_url, parameters) from_consumer_and_token = staticmethod(from_consumer_and_token) def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} parameters['oauth_token'] = token.key if callback: parameters['oauth_callback'] = callback return OAuthRequest(http_method, http_url, parameters) from_token_and_callback = staticmethod(from_token_and_callback) def _split_header(header): """Turn Authorization: header into parameters.""" params = {} parts = header.split(',') for param in parts: # Ignore realm parameter. if param.find('realm') > -1: continue # Remove whitespace. param = param.strip() # Split key-value. param_parts = param.split('=', 1) # Remove quotes and unescape the value. params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) return params _split_header = staticmethod(_split_header) def _split_url_string(param_str): """Turn URL string into parameters.""" parameters = cgi.parse_qs(param_str, keep_blank_values=False) for k, v in parameters.iteritems(): parameters[k] = urllib.unquote(v[0]) return parameters _split_url_string = staticmethod(_split_url_string) class OAuthServer(object): """A worker to check the validity of a request against a data store.""" timestamp_threshold = 300 # In seconds, five minutes. version = VERSION signature_methods = None data_store = None def __init__(self, data_store=None, signature_methods=None): self.data_store = data_store self.signature_methods = signature_methods or {} def set_data_store(self, data_store): self.data_store = data_store def get_data_store(self): return self.data_store def add_signature_method(self, signature_method): self.signature_methods[signature_method.get_name()] = signature_method return self.signature_methods def fetch_request_token(self, oauth_request): """Processes a request_token request and returns the request token on success. """ try: # Get the request token for authorization. token = self._get_token(oauth_request, 'request') except OAuthError: # No token required for the initial token request. version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) try: callback = self.get_callback(oauth_request) except OAuthError: callback = None # 1.0, no callback specified. self._check_signature(oauth_request, consumer, None) # Fetch a new token. token = self.data_store.fetch_request_token(consumer, callback) return token def fetch_access_token(self, oauth_request): """Processes an access_token request and returns the access token on success. """ version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) try: verifier = self._get_verifier(oauth_request) except OAuthError: verifier = None # Get the request token. token = self._get_token(oauth_request, 'request') self._check_signature(oauth_request, consumer, token) new_token = self.data_store.fetch_access_token(consumer, token, verifier) return new_token def verify_request(self, oauth_request): """Verifies an api call and checks all the parameters.""" # -> consumer and token version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) # Get the access token. token = self._get_token(oauth_request, 'access') self._check_signature(oauth_request, consumer, token) parameters = oauth_request.get_nonoauth_parameters() return consumer, token, parameters def authorize_token(self, token, user): """Authorize a request token.""" return self.data_store.authorize_request_token(token, user) def get_callback(self, oauth_request): """Get the callback URL.""" return oauth_request.get_parameter('oauth_callback') def build_authenticate_header(self, realm=''): """Optional support for the authenticate header.""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} def _get_version(self, oauth_request): """Verify the correct version request for this server.""" try: version = oauth_request.get_parameter('oauth_version') except: version = VERSION if version and version != self.version: raise OAuthError('OAuth version %s not supported.' % str(version)) return version def _get_signature_method(self, oauth_request): """Figure out the signature with some defaults.""" try: signature_method = oauth_request.get_parameter( 'oauth_signature_method') except: signature_method = SIGNATURE_METHOD try: # Get the signature method object. signature_method = self.signature_methods[signature_method] except: signature_method_names = ', '.join(self.signature_methods.keys()) raise OAuthError('Signature method %s not supported try one of the ' 'following: %s' % (signature_method, signature_method_names)) return signature_method def _get_consumer(self, oauth_request): consumer_key = oauth_request.get_parameter('oauth_consumer_key') consumer = self.data_store.lookup_consumer(consumer_key) if not consumer: raise OAuthError('Invalid consumer.') return consumer def _get_token(self, oauth_request, token_type='access'): """Try to find the token for the provided request token key.""" token_field = oauth_request.get_parameter('oauth_token') token = self.data_store.lookup_token(token_type, token_field) if not token: raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) return token def _get_verifier(self, oauth_request): return oauth_request.get_parameter('oauth_verifier') def _check_signature(self, oauth_request, consumer, token): timestamp, nonce = oauth_request._get_timestamp_nonce() self._check_timestamp(timestamp) self._check_nonce(consumer, token, nonce) signature_method = self._get_signature_method(oauth_request) try: signature = oauth_request.get_parameter('oauth_signature') except: raise OAuthError('Missing signature.') # Validate the signature. valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) if not valid_sig: key, base = signature_method.build_signature_base_string( oauth_request, consumer, token) raise OAuthError('Invalid signature. Expected signature base ' 'string: %s' % base) built = signature_method.build_signature(oauth_request, consumer, token) def _check_timestamp(self, timestamp): """Verify that timestamp is recentish.""" timestamp = int(timestamp) now = int(time.time()) lapsed = abs(now - timestamp) if lapsed > self.timestamp_threshold: raise OAuthError('Expired timestamp: given %d and now %s has a ' 'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) def _check_nonce(self, consumer, token, nonce): """Verify that the nonce is uniqueish.""" nonce = self.data_store.lookup_nonce(consumer, token, nonce) if nonce: raise OAuthError('Nonce already used: %s' % str(nonce)) class OAuthClient(object): """OAuthClient is a worker to attempt to execute a request.""" consumer = None token = None def __init__(self, oauth_consumer, oauth_token): self.consumer = oauth_consumer self.token = oauth_token def get_consumer(self): return self.consumer def get_token(self): return self.token def fetch_request_token(self, oauth_request): """-> OAuthToken.""" raise NotImplementedError def fetch_access_token(self, oauth_request): """-> OAuthToken.""" raise NotImplementedError def access_resource(self, oauth_request): """-> Some protected resource.""" raise NotImplementedError class OAuthDataStore(object): """A database abstraction used to lookup consumers and tokens.""" def lookup_consumer(self, key): """-> OAuthConsumer.""" raise NotImplementedError def lookup_token(self, oauth_consumer, token_type, token_token): """-> OAuthToken.""" raise NotImplementedError def lookup_nonce(self, oauth_consumer, oauth_token, nonce): """-> OAuthToken.""" raise NotImplementedError def fetch_request_token(self, oauth_consumer, oauth_callback): """-> OAuthToken.""" raise NotImplementedError def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier): """-> OAuthToken.""" raise NotImplementedError def authorize_request_token(self, oauth_token, user): """-> OAuthToken.""" raise NotImplementedError class OAuthSignatureMethod(object): """A strategy class that implements a signature method.""" def get_name(self): """-> str.""" raise NotImplementedError def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): """-> str key, str raw.""" raise NotImplementedError def build_signature(self, oauth_request, oauth_consumer, oauth_token): """-> str.""" raise NotImplementedError def check_signature(self, oauth_request, consumer, token, signature): built = self.build_signature(oauth_request, consumer, token) return built == signature class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): def get_name(self): return 'HMAC-SHA1' def build_signature_base_string(self, oauth_request, consumer, token): sig = ( escape(oauth_request.get_normalized_http_method()), escape(oauth_request.get_normalized_http_url()), escape(oauth_request.get_normalized_parameters()), ) key = '%s&' % escape(consumer.secret) if token: key += escape(token.secret) #print "OAuth base string:" + str(sig) raw = '&'.join(sig) return key, raw def build_signature(self, oauth_request, consumer, token): """Builds the base signature string.""" key, raw = self.build_signature_base_string(oauth_request, consumer, token) # HMAC object. try: import hashlib # 2.5 hashed = hmac.new(key, raw, hashlib.sha1) except: import sha # Deprecated hashed = hmac.new(key, raw, sha) # Calculate the digest base 64. return binascii.b2a_base64(hashed.digest())[:-1] class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): def get_name(self): return 'PLAINTEXT' def build_signature_base_string(self, oauth_request, consumer, token): """Concatenates the consumer key and secret.""" sig = '%s&' % escape(consumer.secret) if token: sig = sig + escape(token.secret) return sig, sig def build_signature(self, oauth_request, consumer, token): key, raw = self.build_signature_base_string(oauth_request, consumer, token) return key
[ [ 8, 0, 0.0183, 0.0352, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0382, 0.0015, 0, 0.66, 0.0385, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 1, 0, 0.0398, 0.0015, 0, 0.66...
[ "\"\"\"\nThe MIT License\n\nCopyright (c) 2007 Leah Culver\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights", "import cgi", ...
# Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. """ weibo API library """ __version__ = '1.5' __author__ = 'Joshua Roesslein' __license__ = 'MIT' from weibopy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResult, ModelFactory, IDSModel from weibopy.error import WeibopError from weibopy.api import API from weibopy.cache import Cache, MemoryCache, FileCache from weibopy.auth import BasicAuthHandler, OAuthHandler from weibopy.streaming import Stream, StreamListener from weibopy.cursor import Cursor # Global, unauthenticated instance of API api = API() def debug(enable=True, level=1): import httplib httplib.HTTPConnection.debuglevel = level
[ [ 8, 0, 0.2222, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2963, 0.037, 0, 0.66, 0.0833, 162, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.3333, 0.037, 0, 0.66, ...
[ "\"\"\"\nweibo API library\n\"\"\"", "__version__ = '1.5'", "__author__ = 'Joshua Roesslein'", "__license__ = 'MIT'", "from weibopy.models import Status, User, DirectMessage, Friendship, SavedSearch, SearchResult, ModelFactory, IDSModel", "from weibopy.error import WeibopError", "from weibopy.api import...