code
stringlengths
1
1.49M
vector
listlengths
0
7.38k
snippet
listlengths
0
7.38k
#!/usr/bin/env python # -*- coding: UTF-8 -*- from google.appengine.ext import webapp from notifiy import constants class Home(webapp.RequestHandler): def get(self): self.redirect(constants.ROBOT_HOME_PAGE)
[ [ 1, 0, 0.3636, 0.0909, 0, 0.66, 0, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.5455, 0.0909, 0, 0.66, 0.5, 36, 0, 1, 0, 0, 36, 0, 0 ], [ 3, 0, 0.9091, 0.2727, 0, 0.66,...
[ "from google.appengine.ext import webapp", "from notifiy import constants", "class Home(webapp.RequestHandler):\n def get(self):\n self.redirect(constants.ROBOT_HOME_PAGE)", " def get(self):\n self.redirect(constants.ROBOT_HOME_PAGE)", " self.redirect(constants.ROBOT_HOME_PAGE)" ]
# -*- coding: UTF-8 -*- import base64 import urllib def get_url(participant, wave_id): domain = participant.split('@')[1] if wave_id: wave_id = urllib.quote(urllib.quote(wave_id)) if wave_id and domain == 'googlewave.com': return 'https://wave.google.com/wave/#restored:wave:%s' % wave_id elif wave_id: return 'https://wave.google.com/a/%s/#restored:wave:%s' % (wave_id, domain) else: return '' def modified_b64encode(s): if type(s) == unicode: s = s.decode('UTF-8') return base64.urlsafe_b64encode(s).replace('=', '') def modified_b64decode(s): while len(s) % 4 != 0: s = s + '=' return base64.urlsafe_b64decode(s).encode('UTF-8') def process_body(body): new_body = [] content_buffer = [] for line in body.split('\n'): if not line: new_body = new_body + content_buffer + [ line ] content_buffer = [] elif line.strip()[0] == '>': content_buffer = [] else: content_buffer.append(line) new_body = new_body + content_buffer return '\n'.join(new_body).strip() def fetch_wavelet(wave_id, wavelet_id, participant): from notifiy.robot import create_robot robot = create_robot(run=False, domain=participant.split('@')[1]) # TODO return robot.fetch_wavelet(wave_id, wavelet_id, participant) return robot.fetch_wavelet(wave_id, wavelet_id) def reply_wavelet(wave_id, wavelet_id, blip_id, participant, message): wavelet = fetch_wavelet(wave_id, wavelet_id, participant) body = '%s: %s' % (participant, message) # TODO remove when proxy_for works if blip_id in wavelet.blips: blip = wavelet.blips[blip_id] blip = blip.reply() blip.append(body) else: blip = wavelet.reply(body) wavelet.robot.submit(wavelet) from notifiy import notifications notifications.notify_submitted(wavelet, blip, participant, message)
[ [ 1, 0, 0.0411, 0.0137, 0, 0.66, 0, 177, 0, 1, 0, 0, 177, 0, 0 ], [ 1, 0, 0.0548, 0.0137, 0, 0.66, 0.1429, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 2, 0, 0.1644, 0.1507, 0, ...
[ "import base64", "import urllib", "def get_url(participant, wave_id):\n domain = participant.split('@')[1]\n if wave_id:\n wave_id = urllib.quote(urllib.quote(wave_id))\n\n if wave_id and domain == 'googlewave.com':\n return 'https://wave.google.com/wave/#restored:wave:%s' % wave_id\n ...
#!/usr/bin/env python # -*- coding: UTF-8 -*- import urllib import datetime from google.appengine.ext import webapp from google.appengine.ext import deferred from waveapi import simplejson from notifiy import model from notifiy import general from notifiy import preferences from notifiy.robot import create_robot class Process(webapp.RequestHandler): def get(self): self.response.contentType = 'application/json' path = [urllib.unquote(a) for a in self.request.path.split('/')[2:]] notification_type = path[0] if not hasattr(self, notification_type): return self.participant = self.request.get('participant') self.wave_id = self.request.get('wave_id') getattr(self, notification_type)() def status(self): self.toggle(False) def toggle(self, toggle=True): pp = model.ParticipantPreferences.get_by_pk(self.participant) pwp = model.ParticipantWavePreferences.get_by_pk(self.participant, self.wave_id, create=toggle) data = '' if pwp: if toggle: pwp.notify_type = (pwp.notify_type + 1) % model.NOTIFY_TYPE_COUNT pwp.put() status = pwp.notify_type email = pwp.notify_type phones = [ 1 ] # TODO count phones if len(phones) == 0: phone = -1 if pwp.notify_type != model.NOTIFY_NONE: phone = model.NOTIFY_ONCE else: phone = model.NOTIFY_NONE data = simplejson.dumps({ 'status': status, 'email': email, 'phone': phone, 'preferencesWaveId': pp and pp.preferences_wave_id or '' }) else: data = simplejson.dumps({ 'status': 0, 'email': 0, 'phone': 0, 'preferencesWaveId': pp and pp.preferences_wave_id or '' }) self.response.out.write(data); def offline(self): self.online(False) def online(self, online=True): pwp = model.ParticipantWavePreferences.get_by_pk(self.participant, self.wave_id) if pwp: pwp.last_visited = datetime.datetime.now() pwp.put() if not online: visited(pwp.participant, self.wave_id, pwp) else: deferred.defer(visited, pwp.participant, pwp.wave_id, pwp.last_visited, _queue='visited', _countdown=150) self.response.out.write(simplejson.dumps({ 'status': 0 })) def reset(self): domain = self.participant.split('@')[1] robot = create_robot(run=False, domain=domain) preferences.create_preferences_wave(robot, self.participant) #wavelet = robot.fetch_wavelet(self.wave_id, '%s!root+conv' % domain) #general.participant_init(wavelet, self.participant) #general.participant_wavelet_init(wavelet, self.participant, self.participant) self.response.out.write(simplejson.dumps({ 'status': 0 })) def confirm(self): email = self.request.get('email') activation = self.request.get('activation') def visited(participant, wave_id=None, last_visited=None): if not wave_id: return pwp = model.ParticipantWavePreferences.get_by_pk(participant, wave_id) if pwp.last_visited == last_visited: pwp.visited = True pwp.put()
[ [ 1, 0, 0.0381, 0.0095, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0476, 0.0095, 0, 0.66, 0.1, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.0667, 0.0095, 0, 0.6...
[ "import urllib", "import datetime", "from google.appengine.ext import webapp", "from google.appengine.ext import deferred", "from waveapi import simplejson", "from notifiy import model", "from notifiy import general", "from notifiy import preferences", "from notifiy.robot import create_robot", "cl...
# -*- coding: UTF-8 -*- ME = 'cesar.izurieta@googlewave.com' ROBOT_NAME = 'notifiy' ROBOT_ID = 'wave-email-notifications' ROBOT_ADDRESS = '%s@appspot.com' % ROBOT_ID ROBOT_BASE_URL = 'http://%s.appspot.com' % ROBOT_ID ROBOT_EMAIL = '%s@ecuarock.net' % ROBOT_ID ROBOT_HOME_PAGE = 'http://%s.googlecode.com' % ROBOT_ID ROBOT_IMG = '%s/%s' % (ROBOT_BASE_URL, 'favicon.png') ROBOT_LOGO = '%s/%s' % (ROBOT_BASE_URL, 'logo.png') RPC_URL = { 'googlewave.com': 'http://gmodules.com/api/rpc', 'wavesandbox.com': 'http://sandbox.gmodules.com/api/rpc' }
[ [ 14, 0, 0.1765, 0.0588, 0, 0.66, 0, 917, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.2941, 0.0588, 0, 0.66, 0.1111, 987, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.3529, 0.0588, 0, 0...
[ "ME = 'cesar.izurieta@googlewave.com'", "ROBOT_NAME = 'notifiy'", "ROBOT_ID = 'wave-email-notifications'", "ROBOT_ADDRESS = '%s@appspot.com' % ROBOT_ID", "ROBOT_BASE_URL = 'http://%s.appspot.com' % ROBOT_ID", "ROBOT_EMAIL = '%s@ecuarock.net' % ROBOT_ID", "ROBOT_HOME_PAGE = 'http://%s.googlecode.com' % R...
# -*- coding: UTF-8 -*- import logging from waveapi import appengine_robot_runner from waveapi import events from waveapi.robot import Robot from notifiy import constants from notifiy import model from notifiy import notifications from notifiy import preferences from notifiy import templates from notifiy import general ################################################### # General handlers ################################################### def on_wavelet_self_added(event, wavelet): if preferences.is_preferences_wave(wavelet): return logging.info('%s called', event.type) setup_oauth(wavelet.robot, wavelet.domain) general.wavelet_init(wavelet, event.modified_by) def on_wavelet_self_removed(event, wavelet): if preferences.is_preferences_wave(wavelet): return logging.info('%s called', event.type) setup_oauth(wavelet.robot, wavelet.domain) general.wavelet_deinit(wavelet) def on_wavelet_participants_changed(event, wavelet): if preferences.is_preferences_wave(wavelet): return logging.info('%s called', event.type) setup_oauth(wavelet.robot, wavelet.domain) if wavelet.root_blip and event.blip_id == wavelet.root_blip.blip_id: general.wavelet_init(wavelet, event.modified_by) message = templates.ADDED_MESSAGE % event.modified_by for participant in event.participants_added: general.participant_wavelet_init(wavelet, participant, event.modified_by, message) ################################################### # Content change handlers ################################################### def on_blip_submitted(event, wavelet): if preferences.is_preferences_wave(wavelet): return logging.info('%s called', event.type) setup_oauth(wavelet.robot, wavelet.domain) if wavelet.root_blip and event.blip_id == wavelet.root_blip.blip_id: general.wavelet_init(wavelet, event.modified_by) notifications.notify_submitted(wavelet, event.blip, event.modified_by) def on_wavelet_blip_removed(event, wavelet): if preferences.is_preferences_wave(wavelet): return logging.info('%s called', event.type) setup_oauth(wavelet.robot, wavelet.domain) if wavelet.root_blip and event.blip_id == wavelet.root_blip.blip_id: general.wavelet_init(wavelet, event.modified_by) notifications.notify_removed(wavelet, event.modified_by) ################################################### # Preferences handlers ################################################### def on_form_button_clicked(event, wavelet): if not preferences.is_preferences_wave(wavelet): return logging.info('%s called', event.type) setup_oauth(wavelet.robot, wavelet.domain) preferences.handle_event(event, wavelet) ################################################### # Main functions ################################################### def create_robot(run=True, domain=None): robot = Robot(constants.ROBOT_NAME.title(), image_url=constants.ROBOT_IMG, profile_url=constants.ROBOT_BASE_URL) robot.register_handler(events.WaveletSelfAdded, on_wavelet_self_added, context=[ events.Context.ROOT ]) robot.register_handler(events.WaveletSelfRemoved, on_wavelet_self_removed, context=[ events.Context.ROOT ]) robot.register_handler(events.WaveletParticipantsChanged, on_wavelet_participants_changed, context=[ events.Context.ROOT ]) robot.register_handler(events.BlipSubmitted, on_blip_submitted, context=[ events.Context.SELF ]) robot.register_handler(events.WaveletBlipRemoved, on_wavelet_blip_removed, context=[ events.Context.SELF ]) robot.register_handler(events.FormButtonClicked, on_form_button_clicked, context=[ events.Context.ALL ]) # Needed to reauthenticate robot # verification_token = model.ApplicationSettings.get("verification-token") # security_token = model.ApplicationSettings.get("security-token") # robot.set_verification_token_info(verification_token, security_token) if domain: setup_oauth(robot, domain) if run: appengine_robot_runner.run(robot) return robot def setup_oauth(robot, domain): consumer_key = model.ApplicationSettings.get("consumer-key") consumer_secret = model.ApplicationSettings.get("consumer-secret") if domain in constants.RPC_URL: url = constants.RPC_URL[domain] else: url = constants.RPC_URL['googlewave.com'] # TODO robot.setup_oauth(consumer_key, consumer_secret, url)
[ [ 1, 0, 0.0234, 0.0078, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0391, 0.0078, 0, 0.66, 0.0588, 326, 0, 1, 0, 0, 326, 0, 0 ], [ 1, 0, 0.0469, 0.0078, 0, ...
[ "import logging", "from waveapi import appengine_robot_runner", "from waveapi import events", "from waveapi.robot import Robot", "from notifiy import constants", "from notifiy import model", "from notifiy import notifications", "from notifiy import preferences", "from notifiy import templates", "f...
# -*- coding: UTF-8 -*- import logging from waveapi.element import Gadget from notifiy import constants from notifiy import model from notifiy import preferences GADGET_URL = '%s/%s.xml' % (constants.ROBOT_BASE_URL, constants.ROBOT_ID) def is_gadget_present(wavelet): return bool(wavelet.root_blip.first(Gadget, url=GADGET_URL)) def gadget_add(wavelet): if not is_gadget_present(wavelet): try: wavelet.root_blip.at(1).insert(Gadget(GADGET_URL)) except IndexError: logging.warn('Could not insert gadget!') def gadget_remove(wavelet): if is_gadget_present(wavelet): wavelet.root_blip.all(GADGET_URL).delete() def handle_state_change(event, wavelet): if not wavelet.root_blip.blip_id == event.blip_id: return if not wavelet.root_blip.all(Gadget)[event.index].url == GADGET_URL: return pp = model.ParticipantPreferences.get_by_pk(event.modified_by) preferences_wavelet = preferences.fetch_preferences_wavelet(wavelet, pp.preferences_wave_id, None) eh = preferences.ExecHandler(event, preferences_wavelet) eh.reset()
[ [ 1, 0, 0.0789, 0.0263, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.1316, 0.0263, 0, 0.66, 0.1111, 665, 0, 1, 0, 0, 665, 0, 0 ], [ 1, 0, 0.1842, 0.0263, 0, ...
[ "import logging", "from waveapi.element import Gadget", "from notifiy import constants", "from notifiy import model", "from notifiy import preferences", "GADGET_URL = '%s/%s.xml' % (constants.ROBOT_BASE_URL, constants.ROBOT_ID)", "def is_gadget_present(wavelet):\n return bool(wavelet.root_blip.first(...
#!/usr/bin/env python # -*- coding: UTF-8 -*- import random from google.appengine.ext import db # TODO from google.appengine.api import memcache from migrationmodel import MigratingModel, get_by_pk NOTIFY_NONE = 0 NOTIFY_ONCE = 1 NOTIFY_ALL = 2 NOTIFY_TYPE_COUNT = 3 class Phone(MigratingModel): migration_version = 1 phone_type = db.StringProperty(required=True) phone_uid = db.StringProperty(required=True) phone_token = db.StringProperty() account_id = db.StringProperty() pk = ['phone_type', 'phone_uid'] class Account(MigratingModel): migration_version = 1 account_id = db.StringProperty(required=True) to_date = db.DateTimeProperty(default=None) subscription_type = db.StringProperty() expiration_date = db.DateProperty() transaction_id = db.StringProperty() receipt_data = db.TextProperty() pk = ['account_id', 'to_date'] class ParticipantPreferences(MigratingModel): migration_version = 3 participant = db.StringProperty(required=True) notify = db.BooleanProperty(default=True) notify_initial = db.BooleanProperty(default=True) email = db.StringProperty() activation = db.StringProperty() preferences_wave_id = db.StringProperty() account_id = db.StringProperty() pk = ['participant'] preferencesWaveId = db.StringProperty(default=None) # Deprecated use preferences_wave_id def __init__(self, *args, **kwds): self.activation = random_activation() super(ParticipantPreferences, self).__init__(*args, **kwds) def put(self, *args, **kwds): # TODO memcache.set(self.get_key(), self, namespace='pp') super(ParticipantPreferences, self).put(*args, **kwds) def migrate_1(self): if self.notify_initial == None: self.notify_initial = True def migrate_2(self): if self.activation == None: self.activation = random_activation() def migrate_3(self): if self.preferencesWaveId: self.preferences_wave_id = self.preferencesWaveId; class ParticipantWavePreferences(MigratingModel): migration_version = 2 participant = db.StringProperty(required=True) wave_id = db.StringProperty(required=False) # TODO migrate all entities notify_type = db.IntegerProperty(default=NOTIFY_NONE) visited = db.BooleanProperty(default=False) last_visited = db.DateTimeProperty() pk = ['participant', 'wave_id'] waveId = db.StringProperty(default=None) # Deprecated use wave_id notify = db.BooleanProperty(default=None) # Deprecated use notify_type #def put(self, *args, **kwds): # TODO # memcache.set(self.get_key(), self, namespace='pwp') # super(ParticipantWavePreferences, self).put(*args, **kwds) def migrate_1(self): if self.notify != None: if self.notify: self.notify_type = NOTIFY_ALL self.notify = None def migrate_2(self): if self.waveId: self.wave_id = self.waveId; @classmethod def get_by_pk(cls, *args, **kw): o = get_by_pk(cls, *args, **kw) if not o: q = ParticipantWavePreferences.all() q.filter('participant =', args[0]) q.filter('waveId =', args[1]) o = q.get() return o class ApplicationSettings(MigratingModel): migration_version = 0 keyname = db.StringProperty(required=True) value = db.StringProperty() pk = ['keyname'] @classmethod def get(cls, keyname): return cls.get_by_pk(keyname).value def random_activation(): return ''.join([str(random.randint(0, 9)) for a in range(9)])
[ [ 1, 0, 0.0303, 0.0076, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0455, 0.0076, 0, 0.66, 0.0833, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.0682, 0.0076, 0, ...
[ "import random", "from google.appengine.ext import db", "from migrationmodel import MigratingModel, get_by_pk", "NOTIFY_NONE = 0", "NOTIFY_ONCE = 1", "NOTIFY_ALL = 2", "NOTIFY_TYPE_COUNT = 3", "class Phone(MigratingModel):\n migration_version = 1\n\n phone_type = db.StringProperty(required=True)...
# -*- coding: UTF-8 -*- import logging from waveapi import element from notifiy import constants from notifiy import model from notifiy import templates PARTICIPANT_DATA_DOC = '%s/participant' % constants.ROBOT_ADDRESS VERSION_DATA_DOC = '%s/preferencesVersion' % constants.ROBOT_ADDRESS PREFERENCES_VERSION = '14' SETTIE_ROBOT = 'settie@a.gwave.com' def is_preferences_wave(wavelet): return VERSION_DATA_DOC in wavelet.data_documents def find_participant(wavelet, participant=None): if PARTICIPANT_DATA_DOC in wavelet.data_documents: return wavelet.data_documents[PARTICIPANT_DATA_DOC] else: return participant def fetch_preferences_wavelet(wavelet, preferences_wave_id): prefs_wavelet = wavelet.robot.fetch_wavelet(preferences_wave_id) return prefs_wavelet def create_preferences_wave(robot, participant): domain = participant.split('@')[1] participants = [ constants.ROBOT_ADDRESS, SETTIE_ROBOT, participant ] prefs_wavelet = robot.new_wave(domain, participants, submit=True) update_preferences_wavelet(prefs_wavelet, participant, force=True) robot.submit(prefs_wavelet) def update_preferences_wavelet(wavelet, participant=None, force=False): if not force and wavelet.data_documents[VERSION_DATA_DOC] == PREFERENCES_VERSION: return participant = find_participant(wavelet, participant) pp = model.ParticipantPreferences.get_by_pk(participant) logging.debug('Updating preferences wave content for %s', participant) if force: pp.preferences_wave_id = wavelet.wave_id pp.put() content = [] content += [ element.Image(url=constants.ROBOT_LOGO, width=200, height=100, caption=constants.ROBOT_NAME.title()) ] content.append('\n') content += [ element.Check('notify', pp.notify), ' Notify me to this email:\n', element.Input('email', str(pp.email)), '\n' ] content += [ element.Check('notify_initial', pp.notify_initial), ' Send initial notifications', '\n' ] content.append('\n') content += [ 'Phone activation code: %s\n' % pp.activation ] if pp.account_id: content += [ 'Phone account id: %s\n' % pp.account_id ] query = model.Phone.all() query.filter('account_id =', pp.account_id) content += [ 'Phones associated with this account: %s\n' % len(list(query)) ] query = model.ParticipantPreferences.all() query.filter('account_id =', pp.account_id) content += [ 'Google wave accounts associated: ' ] content += [ ','.join([ '%s' % pp2.participant for pp2 in query ]) ] content.append('\n') content.append('\n') content += [ element.Button('save_pp', 'Save'), ' ', element.Button('refresh_pp', 'Refresh'), '\n' ] content.append('\n') content += [ 'Execute global commands: (try "help")', element.Input('command', ''), element.Button('exec_pp', 'Exec') ] wavelet.root_blip.all().delete() wavelet.data_documents[PARTICIPANT_DATA_DOC] = participant wavelet.data_documents[VERSION_DATA_DOC] = PREFERENCES_VERSION wavelet.title = 'Notifiy global preferences' for c in content: wavelet.root_blip.append(c) def delete_preferences_wavelet(wavelet, participant=None): if not wavelet: return if not participant: participant = find_participant(wavelet) pp = model.ParticipantPreferences.get_by_pk(participant) if not pp: return prefs_wavelet = fetch_preferences_wavelet(wavelet, pp.preferences_wave_id) prefs_wavelet.title = "Please delete this wave" del prefs_wavelet.data_documents[PARTICIPANT_DATA_DOC] del prefs_wavelet.data_documents[VERSION_DATA_DOC] prefs_wavelet.root_blip.all().delete() wavelet.robot.submit(prefs_wavelet) def handle_event(event, wavelet): participant = find_participant(wavelet, event.modified_by) logging.debug('Preferences if %s == %s' % (participant, event.modified_by)) if participant != event.modified_by: return if event.button_name == 'save_pp': pp = model.ParticipantPreferences.get_by_pk(participant) for t, f, p in [ (element.Check, bool, 'notify'), (element.Input, str, 'email'), (element.Check, bool, 'notify_initial') ]: form_element = wavelet.root_blip.first(t, name=p).value() pp.__setattr__(p, f(form_element.value)) wavelet.reply(templates.PREFERENCES_SAVED) elif event.button_name == 'refresh_pp': if ExecHandler(event, wavelet).refresh(): wavelet.reply(templates.COMMAND_SUCCESSFUL % 'refresh') else: wavelet.reply(templates.ERROR_TRY_AGAIN) elif event.button_name == 'exec_pp': eh = ExecHandler(event, wavelet) form_element = wavelet.root_blip.first(element.Input, name='command').value() command = form_element.value.split(' ') if hasattr(eh, command[0]): result = getattr(eh, command[0])(*command[1:]) if result == True: wavelet.reply(templates.COMMAND_SUCCESSFUL % form_element.value) elif result == False: wavelet.reply(templates.ERROR_TRY_AGAIN) elif result: wavelet.reply(result) else: wavelet.reply(templates.COMMAND_UNKNOWN % command) class ExecHandler(object): def __init__(self, event, wavelet): self.event = event self.wavelet = wavelet def help(self): logging.debug('ExecHandler help') return templates.COMMANDS_HELP def refresh(self): logging.debug('ExecHandler refresh') update_preferences_wavelet(self.wavelet, self.event.modified_by, force=True) return True def clean(self): logging.debug('ExecHandler clean') delete = [] for blip_id in self.wavelet.blips: if blip_id != self.wavelet.root_blip.blip_id: delete.append(blip_id) for blip_id in delete: self.wavelet.delete(blip_id) def reset(self): logging.debug('ExecHandler reset') return "Not implemented yet" def regen(self, participant=None): pp = model.ParticipantPreferences.get_by_pk(participant or self.event.modified_by) pp.activation = model.random_activation() pp.put() return self.refresh() def recreate(self, participant=None): logging.debug('ExecHandler recreate') delete_preferences_wavelet(self.wavelet, participant or self.event.modified_by) create_preferences_wave(self.wavelet.robot, participant or self.event.modified_by) return True
[ [ 1, 0, 0.0165, 0.0055, 0, 0.66, 0, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0275, 0.0055, 0, 0.66, 0.0625, 326, 0, 1, 0, 0, 326, 0, 0 ], [ 1, 0, 0.0385, 0.0055, 0, ...
[ "import logging", "from waveapi import element", "from notifiy import constants", "from notifiy import model", "from notifiy import templates", "PARTICIPANT_DATA_DOC = '%s/participant' % constants.ROBOT_ADDRESS", "VERSION_DATA_DOC = '%s/preferencesVersion' % constants.ROBOT_ADDRESS", "PREFERENCES_VERS...
# -*- coding: UTF-8 -*- ################################################### # General mail template ################################################### MESSAGE_TEMPLATE = u'''\ %s --- Reply to this message to add a blip to the wave Visit this wave: %s Change global notification preferences: %s To unsubscribe please visit your preferences or send an email to: %s ''' NOTIFY_ONCE_TEMPLATE = u'''\ %s [NOTE: you will not recive further messages until you visit this wave] ''' ################################################### # Individual email messages ################################################### INITIAL_MESSAGE = u'To receive email notifications visit this wave and activate them.' ROBOT_ADDED = u'The notifiy robot has been added to this wave. ' ADDED_MESSAGE = u'%s added you as a participant to this wave.' CONTENT_DELETED = u'Some content was deleted from the wave' CONTENT_SUPRESSED = u'%s... [some content was supressed]' PHONE_MESSAGE = '[wave] %s: %s' ################################################### # Unsubscribed messages ################################################### UNSUBSCRIBED_SUBJECT = u'Unsubscribed' UNSUBSCRIBED_BODY = u'Your email has been unsubscribed from the Notifiy robot. \ To receive notifications again please visit Google Wave and update your preferences. \ Your email may still show there, just click the refresh button.' ################################################### # Preferences wave messages ################################################### COMMANDS_HELP = u''' help: Show this help refresh: Recreate the preferences wave clean: Clean all messages in this wave. regen: Regenerate the activation code. reset: Reset your specific wave preferenes (for all waves) and refresh this form. ''' COMMAND_SUCCESSFUL = u'Command %s ran successfully' COMMAND_UNKNOWN = u'Command %s not found' PREFERENCES_SAVED = u'Preferences saved' ERROR_TRY_AGAIN = u'There was an error, please try again in a few moments' ################################################### # Error messages ################################################### ERROR_BODY = u'''Your message "%s" could not be processed because of the following error: %s ========================= ORIGINAL MESSAGE FOLLOWS: ========================= %s '''
[ [ 14, 0, 0.1341, 0.1098, 0, 0.66, 0, 755, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.2317, 0.061, 0, 0.66, 0.0667, 373, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.3293, 0.0122, 0, 0....
[ "MESSAGE_TEMPLATE = u'''\\\n%s\n\n---\nReply to this message to add a blip to the wave\nVisit this wave: %s\nChange global notification preferences: %s\nTo unsubscribe please visit your preferences or send an email to: %s", "NOTIFY_ONCE_TEMPLATE = u'''\\\n%s\n\n[NOTE: you will not recive further messages until yo...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the element module.""" import base64 import unittest import element import util class TestElement(unittest.TestCase): """Tests for the element.Element class.""" def testProperties(self): el = element.Element(element.Gadget.class_type, key='value') self.assertEquals('value', el.key) def testFormElement(self): el = element.Input('input') self.assertEquals(element.Input.class_type, el.type) self.assertEquals(el.value, '') self.assertEquals(el.name, 'input') def testImage(self): image = element.Image('http://test.com/image.png', width=100, height=100) self.assertEquals(element.Image.class_type, image.type) self.assertEquals(image.url, 'http://test.com/image.png') self.assertEquals(image.width, 100) self.assertEquals(image.height, 100) def testAttachment(self): attachment = element.Attachment(caption='My Favorite', data='SomefakeData') self.assertEquals(element.Attachment.class_type, attachment.type) self.assertEquals(attachment.caption, 'My Favorite') self.assertEquals(attachment.data, 'SomefakeData') def testGadget(self): gadget = element.Gadget('http://test.com/gadget.xml') self.assertEquals(element.Gadget.class_type, gadget.type) self.assertEquals(gadget.url, 'http://test.com/gadget.xml') def testInstaller(self): installer = element.Installer('http://test.com/installer.xml') self.assertEquals(element.Installer.class_type, installer.type) self.assertEquals(installer.manifest, 'http://test.com/installer.xml') def testSerialize(self): image = element.Image('http://test.com/image.png', width=100, height=100) s = util.serialize(image) k = s.keys() k.sort() # we should really only have three things to serialize props = s['properties'] self.assertEquals(len(props), 3) self.assertEquals(props['url'], 'http://test.com/image.png') self.assertEquals(props['width'], 100) self.assertEquals(props['height'], 100) def testSerializeAttachment(self): attachment = element.Attachment(caption='My Favorite', data='SomefakeData') s = util.serialize(attachment) k = s.keys() k.sort() # we should really have two things to serialize props = s['properties'] self.assertEquals(len(props), 2) self.assertEquals(props['caption'], 'My Favorite') self.assertEquals(props['data'], base64.encodestring('SomefakeData')) self.assertEquals(attachment.data, 'SomefakeData') def testSerializeLine(self): line = element.Line(element.Line.TYPE_H1, alignment=element.Line.ALIGN_LEFT) s = util.serialize(line) k = s.keys() k.sort() # we should really only have three things to serialize props = s['properties'] self.assertEquals(len(props), 2) self.assertEquals(props['alignment'], 'l') self.assertEquals(props['lineType'], 'h1') def testSerializeGadget(self): gadget = element.Gadget('http://test.com', {'prop1': 'a', 'prop_cap': None}) s = util.serialize(gadget) k = s.keys() k.sort() # we should really only have three things to serialize props = s['properties'] self.assertEquals(len(props), 3) self.assertEquals(props['url'], 'http://test.com') self.assertEquals(props['prop1'], 'a') self.assertEquals(props['prop_cap'], None) def testGadgetElementFromJson(self): url = 'http://www.foo.com/gadget.xml' json = { 'type': element.Gadget.class_type, 'properties': { 'url': url, } } gadget = element.Element.from_json(json) self.assertEquals(element.Gadget.class_type, gadget.type) self.assertEquals(url, gadget.url) def testImageElementFromJson(self): url = 'http://www.foo.com/image.png' width = '32' height = '32' attachment_id = '2' caption = 'Test Image' json = { 'type': element.Image.class_type, 'properties': { 'url': url, 'width': width, 'height': height, 'attachmentId': attachment_id, 'caption': caption, } } image = element.Element.from_json(json) self.assertEquals(element.Image.class_type, image.type) self.assertEquals(url, image.url) self.assertEquals(width, image.width) self.assertEquals(height, image.height) self.assertEquals(attachment_id, image.attachmentId) self.assertEquals(caption, image.caption) def testAttachmentElementFromJson(self): caption = 'fake caption' data = 'fake data' mime_type = 'fake mime' attachment_id = 'fake id' attachment_url = 'fake URL' json = { 'type': element.Attachment.class_type, 'properties': { 'caption': caption, 'data': data, 'mimeType': mime_type, 'attachmentId': attachment_id, 'attachmentUrl': attachment_url, } } attachment = element.Element.from_json(json) self.assertEquals(element.Attachment.class_type, attachment.type) self.assertEquals(caption, attachment.caption) self.assertEquals(data, attachment.data) self.assertEquals(mime_type, attachment.mimeType) self.assertEquals(attachment_id, attachment.attachmentId) self.assertEquals(attachment_url, attachment.attachmentUrl) def testFormElementFromJson(self): name = 'button' value = 'value' default_value = 'foo' json = { 'type': element.Label.class_type, 'properties': { 'name': name, 'value': value, 'defaultValue': default_value, } } el = element.Element.from_json(json) self.assertEquals(element.Label.class_type, el.type) self.assertEquals(name, el.name) self.assertEquals(value, el.value) def testCanInstantiate(self): bag = [element.Check(name='check', value='value'), element.Button(name='button', value='caption'), element.Input(name='input', value='caption'), element.Label(label_for='button', caption='caption'), element.RadioButton(name='name', group='group'), element.RadioButtonGroup(name='name', value='value'), element.Password(name='name', value='geheim'), element.TextArea(name='name', value='\n\n\n'), element.Installer(manifest='test.com/installer.xml'), element.Line(line_type='type', indent='3', alignment='r', direction='d'), element.Gadget(url='test.com/gadget.xml', props={'key1': 'val1', 'key2': 'val2'}), element.Image(url='test.com/image.png', width=100, height=200), element.Attachment(caption='fake caption', data='fake data')] types_constructed = set([type(x) for x in bag]) types_required = set(element.ALL.values()) missing_required = types_constructed.difference(types_required) self.assertEquals(missing_required, set()) missing_constructed = types_required.difference(types_constructed) self.assertEquals(missing_constructed, set()) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.0791, 0.0047, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.093, 0.0047, 0, 0.66, 0.1667, 177, 0, 1, 0, 0, 177, 0, 0 ], [ 1, 0, 0.0977, 0.0047, 0, 0.66,...
[ "\"\"\"Unit tests for the element module.\"\"\"", "import base64", "import unittest", "import element", "import util", "class TestElement(unittest.TestCase):\n \"\"\"Tests for the element.Element class.\"\"\"\n\n def testProperties(self):\n el = element.Element(element.Gadget.class_type,\n ...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the util module.""" __author__ = 'davidbyttow@google.com (David Byttow)' import unittest import ops import util class TestUtils(unittest.TestCase): """Tests utility functions.""" def testIsIterable(self): self.assertTrue(util.is_iterable([])) self.assertTrue(util.is_iterable({})) self.assertTrue(util.is_iterable(set())) self.assertTrue(util.is_iterable(())) self.assertFalse(util.is_iterable(42)) self.assertFalse(util.is_iterable('list?')) self.assertFalse(util.is_iterable(object)) def testIsDict(self): self.assertFalse(util.is_dict([])) self.assertTrue(util.is_dict({})) self.assertFalse(util.is_dict(set())) self.assertFalse(util.is_dict(())) self.assertFalse(util.is_dict(42)) self.assertFalse(util.is_dict('dict?')) self.assertFalse(util.is_dict(object)) def testIsUserDefinedNewStyleClass(self): class OldClass: pass class NewClass(object): pass self.assertFalse(util.is_user_defined_new_style_class(OldClass())) self.assertTrue(util.is_user_defined_new_style_class(NewClass())) self.assertFalse(util.is_user_defined_new_style_class({})) self.assertFalse(util.is_user_defined_new_style_class(())) self.assertFalse(util.is_user_defined_new_style_class(42)) self.assertFalse(util.is_user_defined_new_style_class('instance?')) def testLowerCamelCase(self): self.assertEquals('foo', util.lower_camel_case('foo')) self.assertEquals('fooBar', util.lower_camel_case('foo_bar')) self.assertEquals('fooBar', util.lower_camel_case('fooBar')) self.assertEquals('blipId', util.lower_camel_case('blip_id')) self.assertEquals('fooBar', util.lower_camel_case('foo__bar')) self.assertEquals('fooBarBaz', util.lower_camel_case('foo_bar_baz')) self.assertEquals('f', util.lower_camel_case('f')) self.assertEquals('f', util.lower_camel_case('f_')) self.assertEquals('', util.lower_camel_case('')) self.assertEquals('', util.lower_camel_case('_')) self.assertEquals('aBCDEF', util.lower_camel_case('_a_b_c_d_e_f_')) def assertListsEqual(self, a, b): self.assertEquals(len(a), len(b)) for i in range(len(a)): self.assertEquals(a[i], b[i]) def assertDictsEqual(self, a, b): self.assertEquals(len(a.keys()), len(b.keys())) for k, v in a.iteritems(): self.assertEquals(v, b[k]) def testSerializeList(self): data = [1, 2, 3] output = util.serialize(data) self.assertListsEqual(data, output) def testSerializeDict(self): data = {'key': 'value', 'under_score': 'value2'} expected = {'key': 'value', 'underScore': 'value2'} output = util.serialize(data) self.assertDictsEqual(expected, output) def testNonNoneDict(self): a = {'a': 1, 'b': 1} self.assertDictsEqual(a, util.non_none_dict(a)) b = a.copy() b['c'] = None self.assertDictsEqual(a, util.non_none_dict(b)) def testForceUnicode(self): self.assertEquals(u"aaa", util.force_unicode("aaa")) self.assertEquals(u"12", util.force_unicode(12)) self.assertEquals(u"\u0430\u0431\u0432", util.force_unicode("\xd0\xb0\xd0\xb1\xd0\xb2")) self.assertEquals(u'\u30e6\u30cb\u30b3\u30fc\u30c9', util.force_unicode(u'\u30e6\u30cb\u30b3\u30fc\u30c9')) def testSerializeAttributes(self): class Data(object): def __init__(self): self.public = 1 self._protected = 2 self.__private = 3 def Func(self): pass data = Data() output = util.serialize(data) # Functions and non-public fields should not be serialized. self.assertEquals(1, len(output.keys())) self.assertEquals(data.public, output['public']) def testStringEnum(self): util.StringEnum() single = util.StringEnum('foo') self.assertEquals('foo', single.foo) multi = util.StringEnum('foo', 'bar') self.assertEquals('foo', multi.foo) self.assertEquals('bar', multi.bar) def testParseMarkup(self): self.assertEquals('foo', util.parse_markup('foo')) self.assertEquals('foo bar', util.parse_markup('foo <b>bar</b>')) self.assertEquals('foo\nbar', util.parse_markup('foo<br>bar')) self.assertEquals('foo\nbar', util.parse_markup('foo<p indent="3">bar')) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.1172, 0.0069, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1379, 0.0069, 0, 0.66, 0.1667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1586, 0.0069, 0, 0.66,...
[ "\"\"\"Unit tests for the util module.\"\"\"", "__author__ = 'davidbyttow@google.com (David Byttow)'", "import unittest", "import ops", "import util", "class TestUtils(unittest.TestCase):\n \"\"\"Tests utility functions.\"\"\"\n\n def testIsIterable(self):\n self.assertTrue(util.is_iterable([]))\n ...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import element import errors import util class Annotation(object): """Models an annotation on a document. Annotations are key/value pairs over a range of content. Annotations can be used to store data or to be interpreted by a client when displaying the data. """ # Use the following constants to control the display of the client #: Reserved annotation for setting background color of text. BACKGROUND_COLOR = "style/backgroundColor" #: Reserved annotation for setting color of text. COLOR = "style/color" #: Reserved annotation for setting font family of text. FONT_FAMILY = "style/fontFamily" #: Reserved annotation for setting font family of text. FONT_SIZE = "style/fontSize" #: Reserved annotation for setting font style of text. FONT_STYLE = "style/fontStyle" #: Reserved annotation for setting font weight of text. FONT_WEIGHT = "style/fontWeight" #: Reserved annotation for setting text decoration. TEXT_DECORATION = "style/textDecoration" #: Reserved annotation for setting vertical alignment. VERTICAL_ALIGN = "style/verticalAlign" def __init__(self, name, value, start, end): self._name = name self._value = value self._start = start self._end = end @property def name(self): return self._name @property def value(self): return self._value @property def start(self): return self._start @property def end(self): return self._end def _shift(self, where, inc): """Shift annotation by 'inc' if it (partly) overlaps with 'where'.""" if self._start >= where: self._start += inc if self._end >= where: self._end += inc def serialize(self): """Serializes the annotation. Returns: A dict containing the name, value, and range values. """ return {'name': self._name, 'value': self._value, 'range': {'start': self._start, 'end': self._end}} class Annotations(object): """A dictionary-like object containing the annotations, keyed by name.""" def __init__(self, operation_queue, blip): self._operation_queue = operation_queue self._blip = blip self._store = {} def __contains__(self, what): if isinstance(what, Annotation): what = what.name return what in self._store def _add_internal(self, name, value, start, end): """Internal add annotation does not send out operations.""" if name in self._store: # TODO: use bisect to make this more efficient. new_list = [] for existing in self._store[name]: if start > existing.end or end < existing.start: new_list.append(existing) else: if existing.value == value: # merge the annotations: start = min(existing.start, start) end = max(existing.end, end) else: # chop the bits off the existing annotation if existing.start < start: new_list.append(Annotation( existing.name, existing.value, existing.start, start)) if existing.end > end: new_list.append(Annotation( existing.name, existing.value, existing.end, end)) new_list.append(Annotation(name, value, start, end)) self._store[name] = new_list else: self._store[name] = [Annotation(name, value, start, end)] def _delete_internal(self, name, start=0, end=-1): """Remove the passed annotaion from the internal representation.""" if not name in self._store: return if end < 0: end = len(self._blip) + end new_list = [] for a in self._store[name]: if start > a.end or end < a.start: new_list.append(a) elif start < a.start and end > a.end: continue else: if a.start < start: new_list.append(Annotation(name, a.value, a.start, start)) if a.end > end: new_list.append(Annotation(name, a.value, end, a.end)) if new_list: self._store[name] = new_list else: del self._store[name] def _shift(self, where, inc): """Shift annotation by 'inc' if it (partly) overlaps with 'where'.""" for annotations in self._store.values(): for annotation in annotations: annotation._shift(where, inc) # Merge fragmented annotations that should be contiguous, for example: # Annotation('foo', 'bar', 1, 2) and Annotation('foo', 'bar', 2, 3). for name, annotations in self._store.items(): new_list = [] for i, annotation in enumerate(annotations): name = annotation.name value = annotation.value start = annotation.start end = annotation.end # Find the last end index. for j, next_annotation in enumerate(annotations[i + 1:]): # Not contiguous, skip. if (end < next_annotation.start): break # Contiguous, merge. if (end == next_annotation.start and value == next_annotation.value): end = next_annotation.end del annotations[j] new_list.append(Annotation(name, value, start, end)) self._store[name] = new_list def __len__(self): return len(self._store) def __getitem__(self, key): return self._store[key] def __iter__(self): for l in self._store.values(): for ann in l: yield ann def names(self): """Return the names of the annotations in the store.""" return self._store.keys() def serialize(self): """Return a list of the serialized annotations.""" res = [] for v in self._store.values(): res += [a.serialize() for a in v] return res class Blips(object): """A dictionary-like object containing the blips, keyed on blip ID.""" def __init__(self, blips): self._blips = blips def __getitem__(self, blip_id): return self._blips[blip_id] def __iter__(self): return self._blips.__iter__() def __len__(self): return len(self._blips) def _add(self, ablip): self._blips[ablip.blip_id] = ablip def _remove_with_id(self, blip_id): del_blip = self._blips[blip_id] if del_blip: # Remove the reference to this blip from its parent. parent_blip = self._blips[blip_id].parent_blip if parent_blip: parent_blip._child_blip_ids.remove(blip_id) del self._blips[blip_id] def get(self, blip_id, default_value=None): """Retrieves a blip. Returns: A Blip object. If none found for the ID, it returns None, or if default_value is specified, it returns that. """ return self._blips.get(blip_id, default_value) def serialize(self): """Serializes the blips. Returns: A dict of serialized blips. """ res = {} for blip_id, item in self._blips.items(): res[blip_id] = item.serialize() return res class BlipRefs(object): """Represents a set of references to contents in a blip. For example, a BlipRefs instance can represent the results of a search, an explicitly set range, a regular expression, or refer to the entire blip. BlipRefs are used to express operations on a blip in a consistent way that can easily be transfered to the server. The typical way of creating a BlipRefs object is to use selector methods on the Blip object. Developers will not usually instantiate a BlipRefs object directly. """ DELETE = 'DELETE' REPLACE = 'REPLACE' INSERT = 'INSERT' INSERT_AFTER = 'INSERT_AFTER' ANNOTATE = 'ANNOTATE' CLEAR_ANNOTATION = 'CLEAR_ANNOTATION' UPDATE_ELEMENT = 'UPDATE_ELEMENT' def __init__(self, blip, maxres=1): self._blip = blip self._maxres = maxres @classmethod def all(cls, blip, findwhat, maxres=-1, **restrictions): """Construct an instance representing the search for text or elements.""" obj = cls(blip, maxres) obj._findwhat = findwhat obj._restrictions = restrictions obj._hits = lambda: obj._find(findwhat, maxres, **restrictions) if findwhat is None: # No findWhat, take the entire blip obj._params = {} else: query = {'maxRes': maxres} if isinstance(findwhat, basestring): query['textMatch'] = findwhat else: query['elementMatch'] = findwhat.class_type query['restrictions'] = restrictions obj._params = {'modifyQuery': query} return obj @classmethod def range(cls, blip, begin, end): """Constructs an instance representing an explicitly set range.""" obj = cls(blip) obj._begin = begin obj._end = end obj._hits = lambda: [(begin, end)] obj._params = {'range': {'start': begin, 'end': end}} return obj def _elem_matches(self, elem, clz, **restrictions): if not isinstance(elem, clz): return False for key, val in restrictions.items(): if getattr(elem, key) != val: return False return True def _find(self, what, maxres=-1, **restrictions): """Iterates where 'what' occurs in the associated blip. What can be either a string or a class reference. Examples: self._find('hello') will return the first occurence of the word hello self._find(element.Gadget, url='http://example.com/gadget.xml') will return the first gadget that has as url example.com. Args: what: what to search for. Can be a class or a string. The class should be an element from element.py maxres: number of results to return at most, or <= 0 for all. restrictions: if what specifies a class, further restrictions of the found instances. Yields: Tuples indicating the range of the matches. For a one character/element match at position x, (x, x+1) is yielded. """ blip = self._blip if what is None: yield 0, len(blip) raise StopIteration if isinstance(what, basestring): idx = blip._content.find(what) count = 0 while idx != -1: yield idx, idx + len(what) count += 1 if count == maxres: raise StopIteration idx = blip._content.find(what, idx + len(what)) else: count = 0 for idx, el in blip._elements.items(): if self._elem_matches(el, what, **restrictions): yield idx, idx + 1 count += 1 if count == maxres: raise StopIteration def _execute(self, modify_how, what, bundled_annotations=None): """Executes this BlipRefs object. Args: modify_how: What to do. Any of the operation declared at the top. what: Depending on the operation. For delete, has to be None. For the others it is a singleton, a list or a function returning what to do; for ANNOTATE tuples of (key, value), for the others either string or elements. If what is a function, it takes three parameters, the content of the blip, the beginning of the matching range and the end. bundled_annotations: Annotations to apply immediately. Raises: IndexError when trying to access content outside of the blip. ValueError when called with the wrong values. Returns: self for chainability. """ blip = self._blip if modify_how != BlipRefs.DELETE: if type(what) != list: what = [what] next_index = 0 matched = [] # updated_elements is used to store the element type of the # element to update updated_elements = [] # For now, if we find one markup, we'll use it everywhere. next = None hit_found = False for start, end in self._hits(): hit_found = True if start < 0: start += len(blip) if end == 0: end += len(blip) if end < 0: end += len(blip) if len(blip) == 0: if start != 0 or end != 0: raise IndexError('Start and end have to be 0 for empty document') elif start < 0 or end < 1 or start >= len(blip) or end > len(blip): raise IndexError('Position outside the document') if modify_how == BlipRefs.DELETE: for i in range(start, end): if i in blip._elements: del blip._elements[i] blip._delete_annotations(start, end) blip._shift(end, start - end) blip._content = blip._content[:start] + blip._content[end:] else: if callable(what): next = what(blip._content, start, end) matched.append(next) else: next = what[next_index] next_index = (next_index + 1) % len(what) if isinstance(next, str): next = util.force_unicode(next) if modify_how == BlipRefs.ANNOTATE: key, value = next blip.annotations._add_internal(key, value, start, end) elif modify_how == BlipRefs.CLEAR_ANNOTATION: blip.annotations._delete_internal(next, start, end) elif modify_how == BlipRefs.UPDATE_ELEMENT: el = blip._elements.get(start) if not element: raise ValueError('No element found at index %s' % start) # the passing around of types this way feels a bit dirty: updated_elements.append(element.Element.from_json({'type': el.type, 'properties': next})) for k, b in next.items(): setattr(el, k, b) else: if modify_how == BlipRefs.INSERT: end = start elif modify_how == BlipRefs.INSERT_AFTER: start = end elif modify_how == BlipRefs.REPLACE: pass else: raise ValueError('Unexpected modify_how: ' + modify_how) if isinstance(next, element.Element): text = ' ' else: text = next # in the case of a replace, and the replacement text is shorter, # delete the delta. if start != end and len(text) < end - start: blip._delete_annotations(start + len(text), end) blip._shift(end, len(text) + start - end) blip._content = blip._content[:start] + text + blip._content[end:] if bundled_annotations: end_annotation = start + len(text) blip._delete_annotations(start, end_annotation) for key, value in bundled_annotations: blip.annotations._add_internal(key, value, start, end_annotation) if isinstance(next, element.Element): blip._elements[start] = next # No match found, return immediately without generating op. if not hit_found: return operation = blip._operation_queue.document_modify(blip.wave_id, blip.wavelet_id, blip.blip_id) for param, value in self._params.items(): operation.set_param(param, value) modify_action = {'modifyHow': modify_how} if modify_how == BlipRefs.DELETE: pass elif modify_how == BlipRefs.UPDATE_ELEMENT: modify_action['elements'] = updated_elements elif (modify_how == BlipRefs.REPLACE or modify_how == BlipRefs.INSERT or modify_how == BlipRefs.INSERT_AFTER): if callable(what): what = matched if what: if not isinstance(next, element.Element): modify_action['values'] = [util.force_unicode(value) for value in what] else: modify_action['elements'] = what elif modify_how == BlipRefs.ANNOTATE: modify_action['values'] = [x[1] for x in what] modify_action['annotationKey'] = what[0][0] elif modify_how == BlipRefs.CLEAR_ANNOTATION: modify_action['annotationKey'] = what[0] if bundled_annotations: modify_action['bundledAnnotations'] = [ {'key': key, 'value': value} for key, value in bundled_annotations] operation.set_param('modifyAction', modify_action) return self def insert(self, what, bundled_annotations=None): """Inserts what at the matched positions.""" return self._execute( BlipRefs.INSERT, what, bundled_annotations=bundled_annotations) def insert_after(self, what, bundled_annotations=None): """Inserts what just after the matched positions.""" return self._execute( BlipRefs.INSERT_AFTER, what, bundled_annotations=bundled_annotations) def replace(self, what, bundled_annotations=None): """Replaces the matched positions with what.""" return self._execute( BlipRefs.REPLACE, what, bundled_annotations=bundled_annotations) def delete(self): """Deletes the content at the matched positions.""" return self._execute(BlipRefs.DELETE, None) def annotate(self, name, value=None): """Annotates the content at the matched positions. You can either specify both name and value to set the same annotation, or supply as the first parameter something that yields name/value pairs. The name and value should both be strings. """ if value is None: what = name else: what = (name, value) return self._execute(BlipRefs.ANNOTATE, what) def clear_annotation(self, name): """Clears the annotation at the matched positions.""" return self._execute(BlipRefs.CLEAR_ANNOTATION, name) def update_element(self, new_values): """Update an existing element with a set of new values.""" return self._execute(BlipRefs.UPDATE_ELEMENT, new_values) def __nonzero__(self): """Return whether we have a value.""" for start, end in self._hits(): return True return False def value(self): """Convenience method to convert a BlipRefs to value of its first match.""" for start, end in self._hits(): if end - start == 1 and start in self._blip._elements: return self._blip._elements[start] else: return self._blip.text[start:end] raise ValueError('BlipRefs has no values') def __getattr__(self, attribute): """Mirror the getattr of value(). This allows for clever things like first(IMAGE).url or blip.annotate_with(key, value).upper() """ return getattr(self.value(), attribute) def __radd__(self, other): """Make it possible to add this to a string.""" return other + self.value() def __cmp__(self, other): """Support comparision with target.""" return cmp(self.value(), other) def __iter__(self): for start_end in self._hits(): yield start_end class Blip(object): """Models a single blip instance. Blips are essentially the documents that make up a conversation. Blips can live in a hierarchy of blips. A root blip has no parent blip id, but all blips have the ids of the wave and wavelet that they are associated with. Blips also contain annotations, content and elements, which are accessed via the Document object. """ def __init__(self, json, other_blips, operation_queue): """Inits this blip with JSON data. Args: json: JSON data dictionary from Wave server. other_blips: A dictionary like object that can be used to resolve ids of blips to blips. operation_queue: an OperationQueue object to store generated operations in. """ self._blip_id = json.get('blipId') self._operation_queue = operation_queue self._child_blip_ids = set(json.get('childBlipIds', [])) self._content = json.get('content', '') self._contributors = set(json.get('contributors', [])) self._creator = json.get('creator') self._last_modified_time = json.get('lastModifiedTime', 0) self._version = json.get('version', 0) self._parent_blip_id = json.get('parentBlipId') self._wave_id = json.get('waveId') self._wavelet_id = json.get('waveletId') if isinstance(other_blips, Blips): self._other_blips = other_blips else: self._other_blips = Blips(other_blips) self._annotations = Annotations(operation_queue, self) for annjson in json.get('annotations', []): r = annjson['range'] self._annotations._add_internal(annjson['name'], annjson['value'], r['start'], r['end']) self._elements = {} json_elements = json.get('elements', {}) for elem in json_elements: self._elements[int(elem)] = element.Element.from_json(json_elements[elem]) self.raw_data = json @property def blip_id(self): """The id of this blip.""" return self._blip_id @property def wave_id(self): """The id of the wave that this blip belongs to.""" return self._wave_id @property def wavelet_id(self): """The id of the wavelet that this blip belongs to.""" return self._wavelet_id @property def child_blip_ids(self): """The set of the ids of this blip's children.""" return self._child_blip_ids @property def child_blips(self): """The set of blips that are children of this blip.""" return set([self._other_blips[blid_id] for blid_id in self._child_blip_ids if blid_id in self._other_blips]) @property def contributors(self): """The set of participant ids that contributed to this blip.""" return self._contributors @property def creator(self): """The id of the participant that created this blip.""" return self._creator @property def last_modified_time(self): """The time in seconds since epoch when this blip was last modified.""" return self._last_modified_time @property def version(self): """The version of this blip.""" return self._version @property def parent_blip_id(self): """The parent blip_id or None if this is the root blip.""" return self._parent_blip_id @property def parent_blip(self): """The parent blip or None if it is the root.""" # if parent_blip_id is None, get will also return None return self._other_blips.get(self._parent_blip_id) @property def inline_blip_offset(self): """The offset in the parent if this blip is inline or -1 if not. If the parent is not in the context, this function will always return -1 since it can't determine the inline blip status. """ parent = self.parent_blip if not parent: return -1 for offset, el in parent._elements.items(): if el.type == element.Element.INLINE_BLIP_TYPE and el.id == self.blip_id: return offset return -1 def is_root(self): """Returns whether this is the root blip of a wavelet.""" return self._parent_blip_id is None @property def annotations(self): """The annotations for this document.""" return self._annotations @property def elements(self): """Returns a list of elements for this document. The elements of a blip are things like forms elements and gadgets that cannot be expressed as plain text. In the text of the blip, you'll typically find a space as a place holder for the element. If you want to retrieve the element at a particular index in the blip, use blip[index].value(). """ return self._elements.values() def __len__(self): return len(self._content) def __getitem__(self, item): """returns a BlipRefs for the given slice.""" if isinstance(item, slice): if item.step: raise errors.Error('Step not supported for blip slices') return self.range(item.start, item.stop) else: return self.at(item) def __setitem__(self, item, value): """short cut for self.range/at().replace(value).""" self.__getitem__(item).replace(value) def __delitem__(self, item): """short cut for self.range/at().delete().""" self.__getitem__(item).delete() def _shift(self, where, inc): """Move element and annotations after 'where' up by 'inc'.""" new_elements = {} for idx, el in self._elements.items(): if idx >= where: idx += inc new_elements[idx] = el self._elements = new_elements self._annotations._shift(where, inc) def _delete_annotations(self, start, end): """Delete all annotations between 'start' and 'end'.""" for annotation_name in self._annotations.names(): self._annotations._delete_internal(annotation_name, start, end) def all(self, findwhat=None, maxres=-1, **restrictions): """Returns a BlipRefs object representing all results for the search. If searching for an element, the restrictions can be used to specify additional element properties to filter on, like the url of a Gadget. """ return BlipRefs.all(self, findwhat, maxres, **restrictions) def first(self, findwhat=None, **restrictions): """Returns a BlipRefs object representing the first result for the search. If searching for an element, the restrictions can be used to specify additional element properties to filter on, like the url of a Gadget. """ return BlipRefs.all(self, findwhat, 1, **restrictions) def at(self, index): """Returns a BlipRefs object representing a 1-character range.""" return BlipRefs.range(self, index, index + 1) def range(self, start, end): """Returns a BlipRefs object representing the range.""" return BlipRefs.range(self, start, end) def serialize(self): """Return a dictionary representation of this blip ready for json.""" return {'blipId': self._blip_id, 'childBlipIds': list(self._child_blip_ids), 'content': self._content, 'creator': self._creator, 'contributors': list(self._contributors), 'lastModifiedTime': self._last_modified_time, 'version': self._version, 'parentBlipId': self._parent_blip_id, 'waveId': self._wave_id, 'waveletId': self._wavelet_id, 'annotations': self._annotations.serialize(), 'elements': dict([(index, e.serialize()) for index, e in self._elements.items()]) } def proxy_for(self, proxy_for_id): """Return a view on this blip that will proxy for the specified id. A shallow copy of the current blip is returned with the proxy_for_id set. Any modifications made to this copy will be done using the proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will be used. """ operation_queue = self._operation_queue.proxy_for(proxy_for_id) res = Blip(json={}, other_blips={}, operation_queue=operation_queue) res._blip_id = self._blip_id res._child_blip_ids = self._child_blip_ids res._content = self._content res._contributors = self._contributors res._creator = self._creator res._last_modified_time = self._last_modified_time res._version = self._version res._parent_blip_id = self._parent_blip_id res._wave_id = self._wave_id res._wavelet_id = self._wavelet_id res._other_blips = self._other_blips res._annotations = self._annotations res._elements = self._elements res.raw_data = self.raw_data return res @property def text(self): """Returns the raw text content of this document.""" return self._content def find(self, what, **restrictions): """Iterate to matching bits of contents. Yield either elements or pieces of text. """ br = BlipRefs.all(self, what, **restrictions) for start, end in br._hits(): if end - start == 1 and start in self._elements: yield self._elements[start] else: yield self._content[start:end] raise StopIteration def append(self, what, bundled_annotations=None): """Convenience method covering a common pattern.""" return BlipRefs.all(self, findwhat=None).insert_after( what, bundled_annotations=bundled_annotations) def reply(self): """Create and return a reply to this blip.""" blip_data = self._operation_queue.blip_create_child(self.wave_id, self.wavelet_id, self.blip_id) new_blip = Blip(blip_data, self._other_blips, self._operation_queue) self._other_blips._add(new_blip) return new_blip def append_markup(self, markup): """Interpret the markup text as xhtml and append the result to the doc. Args: markup: The markup'ed text to append. """ markup = util.force_unicode(markup) self._operation_queue.document_append_markup(self.wave_id, self.wavelet_id, self.blip_id, markup) self._content += util.parse_markup(markup) def insert_inline_blip(self, position): """Inserts an inline blip into this blip at a specific position. Args: position: Position to insert the blip at. This has to be greater than 0. Returns: The JSON data of the blip that was created. """ if position <= 0: raise IndexError(('Illegal inline blip position: %d. Position has to ' + 'be greater than 0.') % position) blip_data = self._operation_queue.document_inline_blip_insert( self.wave_id, self.wavelet_id, self.blip_id, position) new_blip = Blip(blip_data, self._other_blips, self._operation_queue) self._other_blips._add(new_blip) return new_blip
[ [ 1, 0, 0.0191, 0.0011, 0, 0.66, 0, 736, 0, 1, 0, 0, 736, 0, 0 ], [ 1, 0, 0.0202, 0.0011, 0, 0.66, 0.1429, 841, 0, 1, 0, 0, 841, 0, 0 ], [ 1, 0, 0.0225, 0.0011, 0, ...
[ "import element", "import errors", "import util", "class Annotation(object):\n \"\"\"Models an annotation on a document.\n\n Annotations are key/value pairs over a range of content. Annotations\n can be used to store data or to be interpreted by a client when displaying\n the data.\n \"\"\"", " \"\"\"...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the robot module.""" import unittest import events import ops import robot import simplejson BLIP_JSON = ('{"wdykLROk*13":' '{"lastModifiedTime":1242079608457,' '"contributors":["someguy@test.com"],' '"waveletId":"test.com!conv+root",' '"waveId":"test.com!wdykLROk*11",' '"parentBlipId":null,' '"version":3,' '"creator":"someguy@test.com",' '"content":"\\nContent!",' '"blipId":"wdykLROk*13","' 'annotations":[{"range":{"start":0,"end":1},' '"name":"user/e/davidbyttow@google.com","value":"David"}],' '"elements":{},' '"childBlipIds":[]}' '}') WAVELET_JSON = ('{"lastModifiedTime":1242079611003,' '"title":"A title",' '"waveletId":"test.com!conv+root",' '"rootBlipId":"wdykLROk*13",' '"dataDocuments":null,' '"creationTime":1242079608457,' '"waveId":"test.com!wdykLROk*11",' '"participants":["someguy@test.com","monty@appspot.com"],' '"creator":"someguy@test.com",' '"version":5}') EVENTS_JSON = ('[{"timestamp":1242079611003,' '"modifiedBy":"someguy@test.com",' '"properties":{"participantsRemoved":[],' '"participantsAdded":["monty@appspot.com"]},' '"type":"WAVELET_PARTICIPANTS_CHANGED"}]') TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % ( BLIP_JSON, WAVELET_JSON, EVENTS_JSON) NEW_WAVE_JSON = [{"data": {"waveletId": "wavesandbox.com!conv+root", "blipId": "b+LrODcLZkDlu", "waveId": "wavesandbox.com!w+LrODcLZkDlt"}, "id": "op2"}] NEW_WAVE_JSON_OLD = [{'data': [{'data': {'waveletId': 'googlewave.com!conv+root', 'blipId': 'b+VqQXQbZkCP1', 'waveId': 'googlewave.com!w+VqQXQbZkCP0'}, 'id': 'wavelet.create1265055048410'}], 'id': 'op10'}]; class TestRobot(unittest.TestCase): """Tests for testing the basic parsing of json in robots.""" def setUp(self): self.robot = robot.Robot('Testy') def testCreateWave(self): self.robot.submit = lambda x: NEW_WAVE_JSON new_wave = self.robot.new_wave('wavesandbox.com', submit=True) self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id) self.robot.submit = lambda x: NEW_WAVE_JSON_OLD new_wave = self.robot.new_wave('googlewave.com', submit=True) self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id) def testEventParsing(self): def check(event, wavelet): # Test some basic properties; the rest should be covered by # ops.CreateContext. root = wavelet.root_blip self.assertEqual(1, len(wavelet.blips)) self.assertEqual('wdykLROk*13', root.blip_id) self.assertEqual('test.com!wdykLROk*11', root.wave_id) self.assertEqual('test.com!conv+root', root.wavelet_id) self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type) self.assertEqual({'participantsRemoved': [], 'participantsAdded': ['monty@appspot.com']}, event.properties) self.robot.test_called = True self.robot.test_called = False self.robot.register_handler(events.WaveletParticipantsChanged, check) json = self.robot.process_events(TEST_JSON) self.assertTrue(self.robot.test_called) operations = simplejson.loads(json) # there should be one operation indicating the current version: self.assertEqual(1, len(operations)) def testWrongEventsIgnored(self): self.robot.test_called = True def check(event, wavelet): called = True self.robot.test_called = False self.robot.register_handler(events.BlipSubmitted, check) self.robot.process_events(TEST_JSON) self.assertFalse(self.robot.test_called) def testOperationParsing(self): def check(event, wavelet): wavelet.reply() wavelet.title = 'new title' wavelet.root_blip.append_markup('<b>Hello</b>') self.robot.register_handler(events.WaveletParticipantsChanged, check) json = self.robot.process_events(TEST_JSON) operations = simplejson.loads(json) expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH, ops.WAVELET_APPEND_BLIP, ops.WAVELET_SET_TITLE, ops.DOCUMENT_APPEND_MARKUP]) methods = [operation['method'] for operation in operations] for method in methods: self.assertTrue(method in expected) expected.remove(method) self.assertEquals(0, len(expected)) def testSerializeWavelets(self): wavelet = self.robot.blind_wavelet(TEST_JSON) serialized = wavelet.serialize() unserialized = self.robot.blind_wavelet(serialized) self.assertEquals(wavelet.creator, unserialized.creator) self.assertEquals(wavelet.creation_time, unserialized.creation_time) self.assertEquals(wavelet.last_modified_time, unserialized.last_modified_time) self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id) self.assertEquals(wavelet.title, unserialized.title) self.assertEquals(wavelet.wave_id, unserialized.wave_id) self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id) self.assertEquals(wavelet.domain, unserialized.domain) def testProxiedBlindWavelet(self): def handler(event, wavelet): blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid') blind_wavelet.reply() blind_wavelet.submit_with(wavelet) self.robot.register_handler(events.WaveletParticipantsChanged, handler) json = self.robot.process_events(TEST_JSON) operations = simplejson.loads(json) self.assertEqual(2, len(operations)) self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH, operations[0]['method']) self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method']) self.assertEquals('proxyid', operations[1]['params']['proxyingFor']) def testCapabilitiesHashIncludesContextAndFilter(self): robot1 = robot.Robot('Robot1') robot1.register_handler(events.WaveletSelfAdded, lambda: '') robot2 = robot.Robot('Robot2') robot2.register_handler(events.WaveletSelfAdded, lambda: '', context=events.Context.ALL) self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash()) robot3 = robot.Robot('Robot3') robot2.register_handler(events.WaveletSelfAdded, lambda: '', context=events.Context.ALL, filter="foo") self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash()) self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash()) self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash()) class TestGetCapabilitiesXml(unittest.TestCase): def setUp(self): self.robot = robot.Robot('Testy') self.robot.capabilities_hash = lambda: '1' def assertStringsEqual(self, s1, s2): self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2)) def testDefault(self): expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testUrls(self): profile_robot = robot.Robot( 'Testy', image_url='http://example.com/image.png', profile_url='http://example.com/profile.xml') profile_robot.capabilities_hash = lambda: '1' expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = profile_robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testConsumerKey(self): # setup_oauth doesn't work during testing, so heavy handed setting of # properties it is: self.robot._consumer_key = 'consumer' expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:consumer_key>consumer</w:consumer_key>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testCapsAndEvents(self): self.robot.register_handler(events.BlipSubmitted, None, context=[events.Context.SELF, events.Context.ROOT]) expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n' ' <w:capability name="%s" context="SELF,ROOT"/>\n' '</w:capabilities>\n' '</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type) xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.0651, 0.0038, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0728, 0.0038, 0, 0.66, 0.0714, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0805, 0.0038, 0, 0.66, ...
[ "\"\"\"Unit tests for the robot module.\"\"\"", "import unittest", "import events", "import ops", "import robot", "import simplejson", "BLIP_JSON = ('{\"wdykLROk*13\":'\n '{\"lastModifiedTime\":1242079608457,'\n '\"contributors\":[\"someguy@test.com\"],'\n '\"wa...
#!/usr/bin/python2.4 # # Copyright 2009 Google Inc. All Rights Reserved. """Run robot from the commandline for testing. This robot_runner let's you define event handlers using flags and takes the json input from the std in and writes out the json output to stdout. for example cat events | commandline_robot_runner.py \ --eventdef-blip_submitted="wavelet.title='title'" """ __author__ = 'douwe@google.com (Douwe Osinga)' import sys import urllib from google3.pyglib import app from google3.pyglib import flags from google3.walkabout.externalagents import api from google3.walkabout.externalagents.api import blip from google3.walkabout.externalagents.api import element from google3.walkabout.externalagents.api import errors from google3.walkabout.externalagents.api import events from google3.walkabout.externalagents.api import ops from google3.walkabout.externalagents.api import robot from google3.walkabout.externalagents.api import util FLAGS = flags.FLAGS for event in events.ALL: flags.DEFINE_string('eventdef_' + event.type.lower(), '', 'Event definition for the %s event' % event.type) def handle_event(src, bot, e, w): """Handle an event by executing the source code src.""" globs = {'e': e, 'w': w, 'api': api, 'bot': bot, 'blip': blip, 'element': element, 'errors': errors, 'events': events, 'ops': ops, 'robot': robot, 'util': util} exec src in globs def run_bot(input_file, output_file): """Run a robot defined on the command line.""" cmdbot = robot.Robot('Commandline bot') for event in events.ALL: src = getattr(FLAGS, 'eventdef_' + event.type.lower()) src = urllib.unquote_plus(src) if src: cmdbot.register_handler(event, lambda event, wavelet, src=src, bot=cmdbot: handle_event(src, bot, event, wavelet)) json_body = unicode(input_file.read(), 'utf8') json_response = cmdbot.process_events(json_body) output_file.write(json_response) def main(argv): run_bot(sys.stdin, sys.stdout) if __name__ == '__main__': app.run()
[ [ 8, 0, 0.1304, 0.1304, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2174, 0.0145, 0, 0.66, 0.0526, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.2464, 0.0145, 0, 0.66,...
[ "\"\"\"Run robot from the commandline for testing.\n\nThis robot_runner let's you define event handlers using flags and takes the\njson input from the std in and writes out the json output to stdout.\n\nfor example\n cat events | commandline_robot_runner.py \\\n --eventdef-blip_submitted=\"wavelet.title='titl...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the robot module.""" import unittest import events import ops import robot import simplejson BLIP_JSON = ('{"wdykLROk*13":' '{"lastModifiedTime":1242079608457,' '"contributors":["someguy@test.com"],' '"waveletId":"test.com!conv+root",' '"waveId":"test.com!wdykLROk*11",' '"parentBlipId":null,' '"version":3,' '"creator":"someguy@test.com",' '"content":"\\nContent!",' '"blipId":"wdykLROk*13","' 'annotations":[{"range":{"start":0,"end":1},' '"name":"user/e/davidbyttow@google.com","value":"David"}],' '"elements":{},' '"childBlipIds":[]}' '}') WAVELET_JSON = ('{"lastModifiedTime":1242079611003,' '"title":"A title",' '"waveletId":"test.com!conv+root",' '"rootBlipId":"wdykLROk*13",' '"dataDocuments":null,' '"creationTime":1242079608457,' '"waveId":"test.com!wdykLROk*11",' '"participants":["someguy@test.com","monty@appspot.com"],' '"creator":"someguy@test.com",' '"version":5}') EVENTS_JSON = ('[{"timestamp":1242079611003,' '"modifiedBy":"someguy@test.com",' '"properties":{"participantsRemoved":[],' '"participantsAdded":["monty@appspot.com"]},' '"type":"WAVELET_PARTICIPANTS_CHANGED"}]') TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % ( BLIP_JSON, WAVELET_JSON, EVENTS_JSON) NEW_WAVE_JSON = [{"data": {"waveletId": "wavesandbox.com!conv+root", "blipId": "b+LrODcLZkDlu", "waveId": "wavesandbox.com!w+LrODcLZkDlt"}, "id": "op2"}] NEW_WAVE_JSON_OLD = [{'data': [{'data': {'waveletId': 'googlewave.com!conv+root', 'blipId': 'b+VqQXQbZkCP1', 'waveId': 'googlewave.com!w+VqQXQbZkCP0'}, 'id': 'wavelet.create1265055048410'}], 'id': 'op10'}]; class TestRobot(unittest.TestCase): """Tests for testing the basic parsing of json in robots.""" def setUp(self): self.robot = robot.Robot('Testy') def testCreateWave(self): self.robot.submit = lambda x: NEW_WAVE_JSON new_wave = self.robot.new_wave('wavesandbox.com', submit=True) self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id) self.robot.submit = lambda x: NEW_WAVE_JSON_OLD new_wave = self.robot.new_wave('googlewave.com', submit=True) self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id) def testEventParsing(self): def check(event, wavelet): # Test some basic properties; the rest should be covered by # ops.CreateContext. root = wavelet.root_blip self.assertEqual(1, len(wavelet.blips)) self.assertEqual('wdykLROk*13', root.blip_id) self.assertEqual('test.com!wdykLROk*11', root.wave_id) self.assertEqual('test.com!conv+root', root.wavelet_id) self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type) self.assertEqual({'participantsRemoved': [], 'participantsAdded': ['monty@appspot.com']}, event.properties) self.robot.test_called = True self.robot.test_called = False self.robot.register_handler(events.WaveletParticipantsChanged, check) json = self.robot.process_events(TEST_JSON) self.assertTrue(self.robot.test_called) operations = simplejson.loads(json) # there should be one operation indicating the current version: self.assertEqual(1, len(operations)) def testWrongEventsIgnored(self): self.robot.test_called = True def check(event, wavelet): called = True self.robot.test_called = False self.robot.register_handler(events.BlipSubmitted, check) self.robot.process_events(TEST_JSON) self.assertFalse(self.robot.test_called) def testOperationParsing(self): def check(event, wavelet): wavelet.reply() wavelet.title = 'new title' wavelet.root_blip.append_markup('<b>Hello</b>') self.robot.register_handler(events.WaveletParticipantsChanged, check) json = self.robot.process_events(TEST_JSON) operations = simplejson.loads(json) expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH, ops.WAVELET_APPEND_BLIP, ops.WAVELET_SET_TITLE, ops.DOCUMENT_APPEND_MARKUP]) methods = [operation['method'] for operation in operations] for method in methods: self.assertTrue(method in expected) expected.remove(method) self.assertEquals(0, len(expected)) def testSerializeWavelets(self): wavelet = self.robot.blind_wavelet(TEST_JSON) serialized = wavelet.serialize() unserialized = self.robot.blind_wavelet(serialized) self.assertEquals(wavelet.creator, unserialized.creator) self.assertEquals(wavelet.creation_time, unserialized.creation_time) self.assertEquals(wavelet.last_modified_time, unserialized.last_modified_time) self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id) self.assertEquals(wavelet.title, unserialized.title) self.assertEquals(wavelet.wave_id, unserialized.wave_id) self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id) self.assertEquals(wavelet.domain, unserialized.domain) def testProxiedBlindWavelet(self): def handler(event, wavelet): blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid') blind_wavelet.reply() blind_wavelet.submit_with(wavelet) self.robot.register_handler(events.WaveletParticipantsChanged, handler) json = self.robot.process_events(TEST_JSON) operations = simplejson.loads(json) self.assertEqual(2, len(operations)) self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH, operations[0]['method']) self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method']) self.assertEquals('proxyid', operations[1]['params']['proxyingFor']) def testCapabilitiesHashIncludesContextAndFilter(self): robot1 = robot.Robot('Robot1') robot1.register_handler(events.WaveletSelfAdded, lambda: '') robot2 = robot.Robot('Robot2') robot2.register_handler(events.WaveletSelfAdded, lambda: '', context=events.Context.ALL) self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash()) robot3 = robot.Robot('Robot3') robot2.register_handler(events.WaveletSelfAdded, lambda: '', context=events.Context.ALL, filter="foo") self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash()) self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash()) self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash()) class TestGetCapabilitiesXml(unittest.TestCase): def setUp(self): self.robot = robot.Robot('Testy') self.robot.capabilities_hash = lambda: '1' def assertStringsEqual(self, s1, s2): self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2)) def testDefault(self): expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testUrls(self): profile_robot = robot.Robot( 'Testy', image_url='http://example.com/image.png', profile_url='http://example.com/profile.xml') profile_robot.capabilities_hash = lambda: '1' expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = profile_robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testConsumerKey(self): # setup_oauth doesn't work during testing, so heavy handed setting of # properties it is: self.robot._consumer_key = 'consumer' expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:consumer_key>consumer</w:consumer_key>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testCapsAndEvents(self): self.robot.register_handler(events.BlipSubmitted, None, context=[events.Context.SELF, events.Context.ROOT]) expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n' ' <w:capability name="%s" context="SELF,ROOT"/>\n' '</w:capabilities>\n' '</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type) xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.0651, 0.0038, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0728, 0.0038, 0, 0.66, 0.0714, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0805, 0.0038, 0, 0.66, ...
[ "\"\"\"Unit tests for the robot module.\"\"\"", "import unittest", "import events", "import ops", "import robot", "import simplejson", "BLIP_JSON = ('{\"wdykLROk*13\":'\n '{\"lastModifiedTime\":1242079608457,'\n '\"contributors\":[\"someguy@test.com\"],'\n '\"wa...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the wavelet module.""" import unittest import blip import element import ops import wavelet import simplejson ROBOT_NAME = 'robot@appspot.com' TEST_WAVELET_DATA = { 'creator': ROBOT_NAME, 'creationTime': 100, 'lastModifiedTime': 101, 'participants': [ROBOT_NAME], 'participantsRoles': {ROBOT_NAME: wavelet.Participants.ROLE_FULL}, 'rootBlipId': 'blip-1', 'title': 'Title', 'waveId': 'test.com!w+g3h3im', 'waveletId': 'test.com!root+conv', 'tags': ['tag1', 'tag2'], } TEST_BLIP_DATA = { 'blipId': TEST_WAVELET_DATA['rootBlipId'], 'childBlipIds': [], 'content': '\ntesting', 'contributors': [TEST_WAVELET_DATA['creator'], 'robot@google.com'], 'creator': TEST_WAVELET_DATA['creator'], 'lastModifiedTime': TEST_WAVELET_DATA['lastModifiedTime'], 'parentBlipId': None, 'waveId': TEST_WAVELET_DATA['waveId'], 'elements': {}, 'waveletId': TEST_WAVELET_DATA['waveletId'], } class TestWavelet(unittest.TestCase): """Tests the wavelet class.""" def setUp(self): self.operation_queue = ops.OperationQueue() self.all_blips = {} self.blip = blip.Blip(TEST_BLIP_DATA, self.all_blips, self.operation_queue) self.all_blips[self.blip.blip_id] = self.blip self.wavelet = wavelet.Wavelet(TEST_WAVELET_DATA, self.all_blips, None, self.operation_queue) self.wavelet.robot_address = ROBOT_NAME def testWaveletProperties(self): w = self.wavelet self.assertEquals(TEST_WAVELET_DATA['creator'], w.creator) self.assertEquals(TEST_WAVELET_DATA['creationTime'], w.creation_time) self.assertEquals(TEST_WAVELET_DATA['lastModifiedTime'], w.last_modified_time) self.assertEquals(len(TEST_WAVELET_DATA['participants']), len(w.participants)) self.assertTrue(TEST_WAVELET_DATA['participants'][0] in w.participants) self.assertEquals(TEST_WAVELET_DATA['rootBlipId'], w.root_blip.blip_id) self.assertEquals(TEST_WAVELET_DATA['title'], w.title) self.assertEquals(TEST_WAVELET_DATA['waveId'], w.wave_id) self.assertEquals(TEST_WAVELET_DATA['waveletId'], w.wavelet_id) self.assertEquals('test.com', w.domain) def testWaveletMethods(self): w = self.wavelet reply = w.reply() self.assertEquals(2, len(w.blips)) w.delete(reply) self.assertEquals(1, len(w.blips)) self.assertEquals(0, len(w.data_documents)) self.wavelet.data_documents['key'] = 'value' self.assert_('key' in w.data_documents) self.assertEquals(1, len(w.data_documents)) for key in w.data_documents: self.assertEquals(key, 'key') self.assertEquals(1, len(w.data_documents.keys())) self.wavelet.data_documents['key'] = None self.assertEquals(0, len(w.data_documents)) num_participants = len(w.participants) w.proxy_for('proxy').reply() self.assertEquals(2, len(w.blips)) # check that the new proxy for participant was added self.assertEquals(num_participants + 1, len(w.participants)) w._robot_address = ROBOT_NAME.replace('@', '+proxy@') w.proxy_for('proxy').reply() self.assertEquals(num_participants + 1, len(w.participants)) self.assertEquals(3, len(w.blips)) def testSetTitle(self): self.blip._content = '\nOld title\n\nContent' self.wavelet.title = 'New title \xd0\xb0\xd0\xb1\xd0\xb2' self.assertEquals(1, len(self.operation_queue)) self.assertEquals('wavelet.setTitle', self.operation_queue.serialize()[1]['method']) self.assertEquals(u'\nNew title \u0430\u0431\u0432\n\nContent', self.blip._content) def testSetTitleAdjustRootBlipWithOneLineProperly(self): self.blip._content = '\nOld title' self.wavelet.title = 'New title' self.assertEquals(1, len(self.operation_queue)) self.assertEquals('wavelet.setTitle', self.operation_queue.serialize()[1]['method']) self.assertEquals('\nNew title\n', self.blip._content) def testSetTitleAdjustEmptyRootBlipProperly(self): self.blip._content = '\n' self.wavelet.title = 'New title' self.assertEquals(1, len(self.operation_queue)) self.assertEquals('wavelet.setTitle', self.operation_queue.serialize()[1]['method']) self.assertEquals('\nNew title\n', self.blip._content) def testTags(self): w = self.wavelet self.assertEquals(2, len(w.tags)) w.tags.append('tag3') self.assertEquals(3, len(w.tags)) w.tags.append('tag3') self.assertEquals(3, len(w.tags)) w.tags.remove('tag1') self.assertEquals(2, len(w.tags)) self.assertEquals('tag2', w.tags[0]) def testParticipantRoles(self): w = self.wavelet self.assertEquals(wavelet.Participants.ROLE_FULL, w.participants.get_role(ROBOT_NAME)) w.participants.set_role(ROBOT_NAME, wavelet.Participants.ROLE_READ_ONLY) self.assertEquals(wavelet.Participants.ROLE_READ_ONLY, w.participants.get_role(ROBOT_NAME)) def testSerialize(self): self.blip.append(element.Gadget('http://test.com', {'a': 3})) self.wavelet.title = 'A wavelet title' self.blip.append(element.Image(url='http://www.google.com/logos/clickortreat1.gif', width=320, height=118)) self.blip.append(element.Attachment(caption='fake', data='fake data')) self.blip.append(element.Line(line_type='li', indent='2')) self.blip.append('bulleted!') self.blip.append(element.Installer( 'http://wave-skynet.appspot.com/public/extensions/areyouin/manifest.xml')) self.wavelet.proxy_for('proxy').reply().append('hi from douwe') inlineBlip = self.blip.insert_inline_blip(5) inlineBlip.append('hello again!') serialized = self.wavelet.serialize() serialized = simplejson.dumps(serialized) self.assertTrue(serialized.find('test.com') > 0) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.096, 0.0056, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.113, 0.0056, 0, 0.66, 0.0909, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.1243, 0.0056, 0, 0.66, ...
[ "\"\"\"Unit tests for the wavelet module.\"\"\"", "import unittest", "import blip", "import element", "import ops", "import wavelet", "import simplejson", "ROBOT_NAME = 'robot@appspot.com'", "TEST_WAVELET_DATA = {\n 'creator': ROBOT_NAME,\n 'creationTime': 100,\n 'lastModifiedTime': 101,\n ...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains various API-specific exception classes. This module contains various specific exception classes that are raised by the library back to the client. """ class Error(Exception): """Base library error type."""
[ [ 8, 0, 0.76, 0.2, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.98, 0.08, 0, 0.66, 1, 529, 0, 0, 0, 0, 645, 0, 0 ], [ 8, 1, 1, 0.04, 1, 0.97, 0, 0, 1...
[ "\"\"\"Contains various API-specific exception classes.\n\nThis module contains various specific exception classes that are raised by\nthe library back to the client.\n\"\"\"", "class Error(Exception):\n \"\"\"Base library error type.\"\"\"", " \"\"\"Base library error type.\"\"\"" ]
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines classes that are needed to model a wavelet.""" import blip import errors import util class DataDocs(object): """Class modeling a bunch of data documents in pythonic way.""" def __init__(self, init_docs, wave_id, wavelet_id, operation_queue): self._docs = init_docs self._wave_id = wave_id self._wavelet_id = wavelet_id self._operation_queue = operation_queue def __iter__(self): return self._docs.__iter__() def __contains__(self, key): return key in self._docs def __delitem__(self, key): if not key in self._docs: return self._operation_queue.wavelet_datadoc_set( self._wave_id, self._wavelet_id, key, None) del self._docs[key] def __getitem__(self, key): return self._docs[key] def __setitem__(self, key, value): self._operation_queue.wavelet_datadoc_set( self._wave_id, self._wavelet_id, key, value) if value is None and key in self._docs: del self._docs[key] else: self._docs[key] = value def __len__(self): return len(self._docs) def keys(self): return self._docs.keys() def serialize(self): """Returns a dictionary of the data documents.""" return self._docs class Participants(object): """Class modelling a set of participants in pythonic way.""" #: Designates full access (read/write) role. ROLE_FULL = "FULL" #: Designates read-only role. ROLE_READ_ONLY = "READ_ONLY" def __init__(self, participants, roles, wave_id, wavelet_id, operation_queue): self._participants = set(participants) self._roles = roles.copy() self._wave_id = wave_id self._wavelet_id = wavelet_id self._operation_queue = operation_queue def __contains__(self, participant): return participant in self._participants def __len__(self): return len(self._participants) def __iter__(self): return self._participants.__iter__() def add(self, participant_id): """Adds a participant by their ID (address).""" self._operation_queue.wavelet_add_participant( self._wave_id, self._wavelet_id, participant_id) self._participants.add(participant_id) def get_role(self, participant_id): """Return the role for the given participant_id.""" return self._roles.get(participant_id, Participants.ROLE_FULL) def set_role(self, participant_id, role): """Sets the role for the given participant_id.""" if role != Participants.ROLE_FULL and role != Participants.ROLE_READ_ONLY: raise ValueError(role + ' is not a valid role') self._operation_queue.wavelet_modify_participant_role( self._wave_id, self._wavelet_id, participant_id, role) self._roles[participant_id] = role def serialize(self): """Returns a list of the participants.""" return list(self._participants) class Tags(object): """Class modelling a list of tags.""" def __init__(self, tags, wave_id, wavelet_id, operation_queue): self._tags = list(tags) self._wave_id = wave_id self._wavelet_id = wavelet_id self._operation_queue = operation_queue def __getitem__(self, index): return self._tags[index] def __len__(self): return len(self._tags) def __iter__(self): return self._tags.__iter__() def append(self, tag): """Appends a tag if it doesn't already exist.""" tag = util.force_unicode(tag) if tag in self._tags: return self._operation_queue.wavelet_modify_tag( self._wave_id, self._wavelet_id, tag) self._tags.append(tag) def remove(self, tag): """Removes a tag if it exists.""" tag = util.force_unicode(tag) if not tag in self._tags: return self._operation_queue.wavelet_modify_tag( self._wave_id, self._wavelet_id, tag, modify_how='remove') self._tags.remove(tag) def serialize(self): """Returns a list of tags.""" return list(self._tags) class Wavelet(object): """Models a single wavelet. A single wavelet is composed of metadata, participants, and its blips. To guarantee that all blips are available, specify Context.ALL for events. """ def __init__(self, json, blips, robot, operation_queue): """Inits this wavelet with JSON data. Args: json: JSON data dictionary from Wave server. blips: a dictionary object that can be used to resolve blips. robot: the robot owning this wavelet. operation_queue: an OperationQueue object to be used to send any generated operations to. """ self._robot = robot self._operation_queue = operation_queue self._wave_id = json.get('waveId') self._wavelet_id = json.get('waveletId') self._creator = json.get('creator') self._creation_time = json.get('creationTime', 0) self._data_documents = DataDocs(json.get('dataDocuments', {}), self._wave_id, self._wavelet_id, operation_queue) self._last_modified_time = json.get('lastModifiedTime') self._participants = Participants(json.get('participants', []), json.get('participantRoles', {}), self._wave_id, self._wavelet_id, operation_queue) self._title = json.get('title', '') self._tags = Tags(json.get('tags', []), self._wave_id, self._wavelet_id, operation_queue) self._raw_data = json self._blips = blip.Blips(blips) self._root_blip_id = json.get('rootBlipId') if self._root_blip_id and self._root_blip_id in self._blips: self._root_blip = self._blips[self._root_blip_id] else: self._root_blip = None self._robot_address = None @property def wavelet_id(self): """Returns this wavelet's id.""" return self._wavelet_id @property def wave_id(self): """Returns this wavelet's parent wave id.""" return self._wave_id @property def creator(self): """Returns the participant id of the creator of this wavelet.""" return self._creator @property def creation_time(self): """Returns the time that this wavelet was first created in milliseconds.""" return self._creation_time @property def data_documents(self): """Returns the data documents for this wavelet based on key name.""" return self._data_documents @property def domain(self): """Return the domain that wavelet belongs to.""" p = self._wave_id.find('!') if p == -1: return None else: return self._wave_id[:p] @property def last_modified_time(self): """Returns the time that this wavelet was last modified in ms.""" return self._last_modified_time @property def participants(self): """Returns a set of participants on this wavelet.""" return self._participants @property def tags(self): """Returns a list of tags for this wavelet.""" return self._tags @property def robot(self): """The robot that owns this wavelet.""" return self._robot def _get_title(self): return self._title def _set_title(self, title): title = util.force_unicode(title) if title.find('\n') != -1: raise errors.Error('Wavelet title should not contain a newline ' + 'character. Specified: ' + title) self._operation_queue.wavelet_set_title(self.wave_id, self.wavelet_id, title) self._title = title # Adjust the content of the root blip, if it is available in the context. if self._root_blip: content = '\n' splits = self._root_blip._content.split('\n', 2) if len(splits) == 3: content += splits[2] self._root_blip._content = '\n' + title + content #: Returns or sets the wavelet's title. title = property(_get_title, _set_title, doc='Get or set the title of the wavelet.') def _get_robot_address(self): return self._robot_address def _set_robot_address(self, address): if self._robot_address: raise errors.Error('robot address already set') self._robot_address = address robot_address = property(_get_robot_address, _set_robot_address, doc='Get or set the address of the current robot.') @property def root_blip(self): """Returns this wavelet's root blip.""" return self._root_blip @property def blips(self): """Returns the blips for this wavelet.""" return self._blips def get_operation_queue(self): """Returns the OperationQueue for this wavelet.""" return self._operation_queue def serialize(self): """Return a dict of the wavelet properties.""" return {'waveId': self._wave_id, 'waveletId': self._wavelet_id, 'creator': self._creator, 'creationTime': self._creation_time, 'dataDocuments': self._data_documents.serialize(), 'lastModifiedTime': self._last_modified_time, 'participants': self._participants.serialize(), 'title': self._title, 'blips': self._blips.serialize(), 'rootBlipId': self._root_blip_id } def proxy_for(self, proxy_for_id): """Return a view on this wavelet that will proxy for the specified id. A shallow copy of the current wavelet is returned with the proxy_for_id set. Any modifications made to this copy will be done using the proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will be used. If the wavelet was retrieved using the Active Robot API, that is by fetch_wavelet, then the address of the robot must be added to the wavelet by setting wavelet.robot_address before calling proxy_for(). """ self.add_proxying_participant(proxy_for_id) operation_queue = self.get_operation_queue().proxy_for(proxy_for_id) res = Wavelet(json={}, blips={}, robot=self.robot, operation_queue=operation_queue) res._wave_id = self._wave_id res._wavelet_id = self._wavelet_id res._creator = self._creator res._creation_time = self._creation_time res._data_documents = self._data_documents res._last_modified_time = self._last_modified_time res._participants = self._participants res._title = self._title res._raw_data = self._raw_data res._blips = self._blips res._root_blip = self._root_blip return res def add_proxying_participant(self, id): """Ads a proxying participant to the wave. Proxying participants are of the form robot+proxy@domain.com. This convenience method constructs this id and then calls participants.add. """ if not self.robot_address: raise errors.Error( 'Need a robot address to add a proxying for participant') robotid, domain = self.robot_address.split('@', 1) if '#' in robotid: robotid, version = robotid.split('#') else: version = None if '+' in robotid: newid = robotid.split('+', 1)[0] + '+' + id else: newid = robotid + '+' + id if version: newid += '#' + version newid += '@' + domain self.participants.add(newid) def submit_with(self, other_wavelet): """Submit this wavelet when the passed other wavelet is submited. wavelets constructed outside of the event callback need to be either explicitly submited using robot.submit(wavelet) or be associated with a different wavelet that will be submited or is part of the event callback. """ other_wavelet._operation_queue.copy_operations(self._operation_queue) self._operation_queue = other_wavelet._operation_queue def reply(self, initial_content=None): """Replies to the conversation in this wavelet. Args: initial_content: If set, start with this (string) content. Returns: A transient version of the blip that contains the reply. """ if not initial_content: initial_content = u'\n' initial_content = util.force_unicode(initial_content) blip_data = self._operation_queue.wavelet_append_blip( self.wave_id, self.wavelet_id, initial_content) instance = blip.Blip(blip_data, self._blips, self._operation_queue) self._blips._add(instance) return instance def delete(self, todelete): """Remove a blip from this wavelet. Args: todelete: either a blip or a blip id to be removed. """ if isinstance(todelete, blip.Blip): blip_id = todelete.blip_id else: blip_id = todelete self._operation_queue.blip_delete(self.wave_id, self.wavelet_id, blip_id) self._blips._remove_with_id(blip_id)
[ [ 8, 0, 0.0407, 0.0024, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0455, 0.0024, 0, 0.66, 0.1429, 134, 0, 1, 0, 0, 134, 0, 0 ], [ 1, 0, 0.0478, 0.0024, 0, 0.66...
[ "\"\"\"Defines classes that are needed to model a wavelet.\"\"\"", "import blip", "import errors", "import util", "class DataDocs(object):\n \"\"\"Class modeling a bunch of data documents in pythonic way.\"\"\"\n\n def __init__(self, init_docs, wave_id, wavelet_id, operation_queue):\n self._docs = init...
#!/usr/bin/python2.4 # # Copyright 2009 Google Inc. All Rights Reserved. """Tests for google3.walkabout.externalagents.api.commandline_robot_runner.""" __author__ = 'douwe@google.com (Douwe Osinga)' import StringIO from google3.pyglib import app from google3.pyglib import flags from google3.testing.pybase import googletest from google3.walkabout.externalagents.api import commandline_robot_runner from google3.walkabout.externalagents.api import events FLAGS = flags.FLAGS BLIP_JSON = ('{"wdykLROk*13":' '{"lastModifiedTime":1242079608457,' '"contributors":["someguy@test.com"],' '"waveletId":"test.com!conv+root",' '"waveId":"test.com!wdykLROk*11",' '"parentBlipId":null,' '"version":3,' '"creator":"someguy@test.com",' '"content":"\\nContent!",' '"blipId":"wdykLROk*13",' '"annotations":[{"range":{"start":0,"end":1},' '"name":"user/e/otherguy@test.com","value":"Other"}],' '"elements":{},' '"childBlipIds":[]}' '}') WAVELET_JSON = ('{"lastModifiedTime":1242079611003,' '"title":"A title",' '"waveletId":"test.com!conv+root",' '"rootBlipId":"wdykLROk*13",' '"dataDocuments":null,' '"creationTime":1242079608457,' '"waveId":"test.com!wdykLROk*11",' '"participants":["someguy@test.com","monty@appspot.com"],' '"creator":"someguy@test.com",' '"version":5}') EVENTS_JSON = ('[{"timestamp":1242079611003,' '"modifiedBy":"someguy@test.com",' '"properties":{"participantsRemoved":[],' '"participantsAdded":["monty@appspot.com"]},' '"type":"WAVELET_PARTICIPANTS_CHANGED"}]') TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % ( BLIP_JSON, WAVELET_JSON, EVENTS_JSON) class CommandlineRobotRunnerTest(googletest.TestCase): def testSimpleFlow(self): FLAGS.eventdef_wavelet_participants_changed = 'x' flag = 'eventdef_' + events.WaveletParticipantsChanged.type.lower() setattr(FLAGS, flag, 'w.title="New title!"') input_stream = StringIO.StringIO(TEST_JSON) output_stream = StringIO.StringIO() commandline_robot_runner.run_bot(input_stream, output_stream) res = output_stream.getvalue() self.assertTrue('wavelet.setTitle' in res) def main(unused_argv): googletest.main() if __name__ == '__main__': app.run()
[ [ 8, 0, 0.0658, 0.0132, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0921, 0.0132, 0, 0.66, 0.0667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1184, 0.0132, 0, 0.66,...
[ "\"\"\"Tests for google3.walkabout.externalagents.api.commandline_robot_runner.\"\"\"", "__author__ = 'douwe@google.com (Douwe Osinga)'", "import StringIO", "from google3.pyglib import app", "from google3.pyglib import flags", "from google3.testing.pybase import googletest", "from google3.walkabout.exte...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to run all unit tests in this package.""" import blip_test import element_test import module_test_runner import ops_test import robot_test import util_test import wavelet_test def RunUnitTests(): """Runs all registered unit tests.""" test_runner = module_test_runner.ModuleTestRunner() test_runner.modules = [ blip_test, element_test, ops_test, robot_test, util_test, wavelet_test, ] test_runner.RunAllTests() if __name__ == "__main__": RunUnitTests()
[ [ 8, 0, 0.3864, 0.0227, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4545, 0.0227, 0, 0.66, 0.1111, 740, 0, 1, 0, 0, 740, 0, 0 ], [ 1, 0, 0.4773, 0.0227, 0, 0.66...
[ "\"\"\"Script to run all unit tests in this package.\"\"\"", "import blip_test", "import element_test", "import module_test_runner", "import ops_test", "import robot_test", "import util_test", "import wavelet_test", "def RunUnitTests():\n \"\"\"Runs all registered unit tests.\"\"\"\n test_runner =...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the util module.""" __author__ = 'davidbyttow@google.com (David Byttow)' import unittest import ops import util class TestUtils(unittest.TestCase): """Tests utility functions.""" def testIsIterable(self): self.assertTrue(util.is_iterable([])) self.assertTrue(util.is_iterable({})) self.assertTrue(util.is_iterable(set())) self.assertTrue(util.is_iterable(())) self.assertFalse(util.is_iterable(42)) self.assertFalse(util.is_iterable('list?')) self.assertFalse(util.is_iterable(object)) def testIsDict(self): self.assertFalse(util.is_dict([])) self.assertTrue(util.is_dict({})) self.assertFalse(util.is_dict(set())) self.assertFalse(util.is_dict(())) self.assertFalse(util.is_dict(42)) self.assertFalse(util.is_dict('dict?')) self.assertFalse(util.is_dict(object)) def testIsUserDefinedNewStyleClass(self): class OldClass: pass class NewClass(object): pass self.assertFalse(util.is_user_defined_new_style_class(OldClass())) self.assertTrue(util.is_user_defined_new_style_class(NewClass())) self.assertFalse(util.is_user_defined_new_style_class({})) self.assertFalse(util.is_user_defined_new_style_class(())) self.assertFalse(util.is_user_defined_new_style_class(42)) self.assertFalse(util.is_user_defined_new_style_class('instance?')) def testLowerCamelCase(self): self.assertEquals('foo', util.lower_camel_case('foo')) self.assertEquals('fooBar', util.lower_camel_case('foo_bar')) self.assertEquals('fooBar', util.lower_camel_case('fooBar')) self.assertEquals('blipId', util.lower_camel_case('blip_id')) self.assertEquals('fooBar', util.lower_camel_case('foo__bar')) self.assertEquals('fooBarBaz', util.lower_camel_case('foo_bar_baz')) self.assertEquals('f', util.lower_camel_case('f')) self.assertEquals('f', util.lower_camel_case('f_')) self.assertEquals('', util.lower_camel_case('')) self.assertEquals('', util.lower_camel_case('_')) self.assertEquals('aBCDEF', util.lower_camel_case('_a_b_c_d_e_f_')) def assertListsEqual(self, a, b): self.assertEquals(len(a), len(b)) for i in range(len(a)): self.assertEquals(a[i], b[i]) def assertDictsEqual(self, a, b): self.assertEquals(len(a.keys()), len(b.keys())) for k, v in a.iteritems(): self.assertEquals(v, b[k]) def testSerializeList(self): data = [1, 2, 3] output = util.serialize(data) self.assertListsEqual(data, output) def testSerializeDict(self): data = {'key': 'value', 'under_score': 'value2'} expected = {'key': 'value', 'underScore': 'value2'} output = util.serialize(data) self.assertDictsEqual(expected, output) def testNonNoneDict(self): a = {'a': 1, 'b': 1} self.assertDictsEqual(a, util.non_none_dict(a)) b = a.copy() b['c'] = None self.assertDictsEqual(a, util.non_none_dict(b)) def testForceUnicode(self): self.assertEquals(u"aaa", util.force_unicode("aaa")) self.assertEquals(u"12", util.force_unicode(12)) self.assertEquals(u"\u0430\u0431\u0432", util.force_unicode("\xd0\xb0\xd0\xb1\xd0\xb2")) self.assertEquals(u'\u30e6\u30cb\u30b3\u30fc\u30c9', util.force_unicode(u'\u30e6\u30cb\u30b3\u30fc\u30c9')) def testSerializeAttributes(self): class Data(object): def __init__(self): self.public = 1 self._protected = 2 self.__private = 3 def Func(self): pass data = Data() output = util.serialize(data) # Functions and non-public fields should not be serialized. self.assertEquals(1, len(output.keys())) self.assertEquals(data.public, output['public']) def testStringEnum(self): util.StringEnum() single = util.StringEnum('foo') self.assertEquals('foo', single.foo) multi = util.StringEnum('foo', 'bar') self.assertEquals('foo', multi.foo) self.assertEquals('bar', multi.bar) def testParseMarkup(self): self.assertEquals('foo', util.parse_markup('foo')) self.assertEquals('foo bar', util.parse_markup('foo <b>bar</b>')) self.assertEquals('foo\nbar', util.parse_markup('foo<br>bar')) self.assertEquals('foo\nbar', util.parse_markup('foo<p indent="3">bar')) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.1172, 0.0069, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1379, 0.0069, 0, 0.66, 0.1667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1586, 0.0069, 0, 0.66,...
[ "\"\"\"Unit tests for the util module.\"\"\"", "__author__ = 'davidbyttow@google.com (David Byttow)'", "import unittest", "import ops", "import util", "class TestUtils(unittest.TestCase):\n \"\"\"Tests utility functions.\"\"\"\n\n def testIsIterable(self):\n self.assertTrue(util.is_iterable([]))\n ...
#!/usr/bin/python2.4 # # Copyright 2009 Google Inc. All Rights Reserved. """Run robot from the commandline for testing. This robot_runner let's you define event handlers using flags and takes the json input from the std in and writes out the json output to stdout. for example cat events | commandline_robot_runner.py \ --eventdef-blip_submitted="wavelet.title='title'" """ __author__ = 'douwe@google.com (Douwe Osinga)' import sys import urllib from google3.pyglib import app from google3.pyglib import flags from google3.walkabout.externalagents import api from google3.walkabout.externalagents.api import blip from google3.walkabout.externalagents.api import element from google3.walkabout.externalagents.api import errors from google3.walkabout.externalagents.api import events from google3.walkabout.externalagents.api import ops from google3.walkabout.externalagents.api import robot from google3.walkabout.externalagents.api import util FLAGS = flags.FLAGS for event in events.ALL: flags.DEFINE_string('eventdef_' + event.type.lower(), '', 'Event definition for the %s event' % event.type) def handle_event(src, bot, e, w): """Handle an event by executing the source code src.""" globs = {'e': e, 'w': w, 'api': api, 'bot': bot, 'blip': blip, 'element': element, 'errors': errors, 'events': events, 'ops': ops, 'robot': robot, 'util': util} exec src in globs def run_bot(input_file, output_file): """Run a robot defined on the command line.""" cmdbot = robot.Robot('Commandline bot') for event in events.ALL: src = getattr(FLAGS, 'eventdef_' + event.type.lower()) src = urllib.unquote_plus(src) if src: cmdbot.register_handler(event, lambda event, wavelet, src=src, bot=cmdbot: handle_event(src, bot, event, wavelet)) json_body = unicode(input_file.read(), 'utf8') json_response = cmdbot.process_events(json_body) output_file.write(json_response) def main(argv): run_bot(sys.stdin, sys.stdout) if __name__ == '__main__': app.run()
[ [ 8, 0, 0.1304, 0.1304, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2174, 0.0145, 0, 0.66, 0.0526, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.2464, 0.0145, 0, 0.66,...
[ "\"\"\"Run robot from the commandline for testing.\n\nThis robot_runner let's you define event handlers using flags and takes the\njson input from the std in and writes out the json output to stdout.\n\nfor example\n cat events | commandline_robot_runner.py \\\n --eventdef-blip_submitted=\"wavelet.title='titl...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Support for operations that can be applied to the server. Contains classes and utilities for creating operations that are to be applied on the server. """ import errors import random import util import sys PROTOCOL_VERSION = '0.21' # Operation Types WAVELET_APPEND_BLIP = 'wavelet.appendBlip' WAVELET_SET_TITLE = 'wavelet.setTitle' WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add' WAVELET_DATADOC_SET = 'wavelet.datadoc.set' WAVELET_MODIFY_TAG = 'wavelet.modifyTag' WAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole' BLIP_CREATE_CHILD = 'blip.createChild' BLIP_DELETE = 'blip.delete' DOCUMENT_APPEND_MARKUP = 'document.appendMarkup' DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert' DOCUMENT_MODIFY = 'document.modify' ROBOT_CREATE_WAVELET = 'robot.createWavelet' ROBOT_FETCH_WAVE = 'robot.fetchWave' ROBOT_NOTIFY_CAPABILITIES_HASH = 'robot.notifyCapabilitiesHash' class Operation(object): """Represents a generic operation applied on the server. This operation class contains data that is filled in depending on the operation type. It can be used directly, but doing so will not result in local, transient reflection of state on the blips. In other words, creating a 'delete blip' operation will not remove the blip from the local context for the duration of this session. It is better to use the OpBased model classes directly instead. """ def __init__(self, method, opid, params): """Initializes this operation with contextual data. Args: method: Method to call or type of operation. opid: The id of the operation. Any callbacks will refer to these. params: An operation type dependent dictionary """ self.method = method self.id = opid self.params = params def __str__(self): return '%s[%s]%s' % (self.method, self.id, str(self.params)) def set_param(self, param, value): self.params[param] = value return self def serialize(self, method_prefix=''): """Serialize the operation. Args: method_prefix: prefixed for each method name to allow for specifying a namespace. Returns: a dict representation of the operation. """ if method_prefix and not method_prefix.endswith('.'): method_prefix += '.' return {'method': method_prefix + self.method, 'id': self.id, 'params': util.serialize(self.params)} def set_optional(self, param, value): """Sets an optional parameter. If value is None or "", this is a no op. Otherwise it calls set_param. """ if value == '' or value is None: return self else: return self.set_param(param, value) class OperationQueue(object): """Wraps the queuing of operations using easily callable functions. The operation queue wraps single operations as functions and queues the resulting operations in-order. Typically there shouldn't be a need to call this directly unless operations are needed on entities outside of the scope of the robot. For example, to modify a blip that does not exist in the current context, you might specify the wave, wavelet and blip id to generate an operation. Any calls to this will not be reflected in the robot in any way. For example, calling wavelet_append_blip will not result in a new blip being added to the robot, only an operation to be applied on the server. """ # Some class global counters: _next_operation_id = 1 def __init__(self, proxy_for_id=None): self.__pending = [] self._capability_hash = 0 self._proxy_for_id = proxy_for_id def _new_blipdata(self, wave_id, wavelet_id, initial_content='', parent_blip_id=None): """Creates JSON of the blip used for this session.""" temp_blip_id = 'TBD_%s_%s' % (wavelet_id, hex(random.randint(0, sys.maxint))) return {'waveId': wave_id, 'waveletId': wavelet_id, 'blipId': temp_blip_id, 'content': initial_content, 'parentBlipId': parent_blip_id} def _new_waveletdata(self, domain, participants): """Creates an ephemeral WaveletData instance used for this session. Args: domain: the domain to create the data for. participants initially on the wavelet Returns: Blipdata (for the rootblip), WaveletData. """ wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint)) wavelet_id = domain + '!conv+root' root_blip_data = self._new_blipdata(wave_id, wavelet_id) participants = set(participants) wavelet_data = {'waveId': wave_id, 'waveletId': wavelet_id, 'rootBlipId': root_blip_data['blipId'], 'participants': participants} return root_blip_data, wavelet_data def __len__(self): return len(self.__pending) def __iter__(self): return self.__pending.__iter__() def clear(self): self.__pending = [] def proxy_for(self, proxy): """Return a view of this operation queue with the proxying for set to proxy. This method returns a new instance of an operation queue that shares the operation list, but has a different proxying_for_id set so the robot using this new queue will send out operations with the proxying_for field set. """ res = OperationQueue() res.__pending = self.__pending res._capability_hash = self._capability_hash res._proxy_for_id = proxy return res def set_capability_hash(self, capability_hash): self._capability_hash = capability_hash def serialize(self): first = Operation(ROBOT_NOTIFY_CAPABILITIES_HASH, '0', {'capabilitiesHash': self._capability_hash, 'protocolVersion': PROTOCOL_VERSION}) operations = [first] + self.__pending res = util.serialize(operations) return res def copy_operations(self, other_queue): """Copy the pending operations from other_queue into this one.""" for op in other_queue: self.__pending.append(op) def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops): """Creates and adds a new operation to the operation list.""" if props is None: props = {} props.update(kwprops) props['waveId'] = wave_id props['waveletId'] = wavelet_id if self._proxy_for_id: props['proxyingFor'] = self._proxy_for_id operation = Operation(method, 'op%s' % OperationQueue._next_operation_id, props) self.__pending.append(operation) OperationQueue._next_operation_id += 1 return operation def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''): """Appends a blip to a wavelet. Args: wave_id: The wave id owning the containing wavelet. wavelet_id: The wavelet id that this blip should be appended to. initial_content: optionally the content to start with Returns: JSON representing the information of the new blip. """ blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content) self.new_operation(WAVELET_APPEND_BLIP, wave_id, wavelet_id, blipData=blip_data) return blip_data def wavelet_add_participant(self, wave_id, wavelet_id, participant_id): """Adds a participant to a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. participant_id: Id of the participant to add. Returns: data for the root_blip, wavelet """ return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id, participantId=participant_id) def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data): """Sets a key/value pair on the data document of a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. name: The key name for this data. data: The value of the data to set. Returns: The operation created. """ return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id, datadocName=name, datadocValue=data) def robot_create_wavelet(self, domain, participants=None, message=''): """Creates a new wavelet. Args: domain: the domain to create the wave in participants: initial participants on this wavelet or None if none message: an optional payload that is returned with the corresponding event. Returns: data for the root_blip, wavelet """ if participants is None: participants = [] blip_data, wavelet_data = self._new_waveletdata(domain, participants) op = self.new_operation(ROBOT_CREATE_WAVELET, wave_id=wavelet_data['waveId'], wavelet_id=wavelet_data['waveletId'], waveletData=wavelet_data) op.set_optional('message', message) return blip_data, wavelet_data def robot_fetch_wave(self, wave_id, wavelet_id): """Requests a snapshot of the specified wave. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. Returns: The operation created. """ return self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id) def wavelet_set_title(self, wave_id, wavelet_id, title): """Sets the title of a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. title: The title to set. Returns: The operation created. """ return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id, waveletTitle=title) def wavelet_modify_participant_role( self, wave_id, wavelet_id, participant_id, role): """Modify the role of a participant on a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. participant_id: Id of the participant to add. role: the new roles Returns: data for the root_blip, wavelet """ return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id, wavelet_id, participantId=participant_id, participantRole=role) def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None): """Modifies a tag in a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. tag: The tag (a string). modify_how: (optional) how to apply the tag. The default is to add the tag. Specify 'remove' to remove. Specify None or 'add' to add. Returns: The operation created. """ return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id, name=tag).set_optional("modify_how", modify_how) def blip_create_child(self, wave_id, wavelet_id, blip_id): """Creates a child blip of another blip. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. Returns: JSON of blip for which further operations can be applied. """ blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id) self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id, blipId=blip_id, blipData=blip_data) return blip_data def blip_delete(self, wave_id, wavelet_id, blip_id): """Deletes the specified blip. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. Returns: The operation created. """ return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id) def document_append_markup(self, wave_id, wavelet_id, blip_id, content): """Appends content with markup to a document. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. content: The markup content to append. Returns: The operation created. """ return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id, blipId=blip_id, content=content) def document_modify(self, wave_id, wavelet_id, blip_id): """Creates and queues a document modify operation The returned operation still needs to be filled with details before it makes sense. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. Returns: The operation created. """ return self.new_operation(DOCUMENT_MODIFY, wave_id, wavelet_id, blipId=blip_id) def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position): """Inserts an inline blip at a specific location. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. position: The position in the document to insert the blip. Returns: JSON data for the blip that was created for further operations. """ inline_blip_data = self._new_blipdata(wave_id, wavelet_id) inline_blip_data['parentBlipId'] = blip_id self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id, blipId=blip_id, index=position, blipData=inline_blip_data) return inline_blip_data
[ [ 8, 0, 0.0453, 0.0119, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0549, 0.0024, 0, 0.66, 0.0476, 841, 0, 1, 0, 0, 841, 0, 0 ], [ 1, 0, 0.0573, 0.0024, 0, 0.66...
[ "\"\"\"Support for operations that can be applied to the server.\n\nContains classes and utilities for creating operations that are to be\napplied on the server.\n\"\"\"", "import errors", "import random", "import util", "import sys", "PROTOCOL_VERSION = '0.21'", "WAVELET_APPEND_BLIP = 'wavelet.appendBl...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module to run wave robots on app engine.""" import logging import sys import events from google.appengine.api import urlfetch from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app class CapabilitiesHandler(webapp.RequestHandler): """Handler to forward a request ot a handler of a robot.""" def __init__(self, method, contenttype): """Initializes this handler with a specific robot.""" self._method = method self._contenttype = contenttype def get(self): """Handles HTTP GET request.""" self.response.headers['Content-Type'] = self._contenttype self.response.out.write(self._method()) class ProfileHandler(webapp.RequestHandler): """Handler to forward a request ot a handler of a robot.""" def __init__(self, method, contenttype): """Initializes this handler with a specific robot.""" self._method = method self._contenttype = contenttype def get(self): """Handles HTTP GET request.""" self.response.headers['Content-Type'] = self._contenttype # Respond with proxied profile if name specified if self.request.get('name'): self.response.out.write(self._method(self.request.get('name'))) else: self.response.out.write(self._method()) class RobotEventHandler(webapp.RequestHandler): """Handler for the dispatching of events to various handlers to a robot. This handler only responds to post events with a JSON post body. Its primary task is to separate out the context data from the events in the post body and dispatch all events in order. Once all events have been dispatched it serializes the context data and its associated operations as a response. """ def __init__(self, robot): """Initializes self with a specific robot.""" self._robot = robot def get(self): """Handles the get event for debugging. This is useful for debugging but since event bundles tend to be rather big it often won't fit for more complex requests. """ ops = self.request.get('events') if ops: self.request.body = events self.post() def post(self): """Handles HTTP POST requests.""" json_body = self.request.body if not json_body: # TODO(davidbyttow): Log error? return # Redirect stdout to stderr while executing handlers. This way, any stray # "print" statements in bot code go to the error logs instead of breaking # the JSON response sent to the HTTP channel. saved_stdout, sys.stdout = sys.stdout, sys.stderr json_body = unicode(json_body, 'utf8') logging.info('Incoming: %s', json_body) json_response = self._robot.process_events(json_body) logging.info('Outgoing: %s', json_response) sys.stdout = saved_stdout # Build the response. self.response.headers['Content-Type'] = 'application/json; charset=utf-8' self.response.out.write(json_response.encode('utf-8')) def operation_error_handler(event, wavelet): """Default operation error handler, logging what went wrong.""" if isinstance(event, events.OperationError): logging.error('Previously operation failed: id=%s, message: %s', event.operation_id, event.error_message) def appengine_post(url, data, headers): result = urlfetch.fetch( method='POST', url=url, payload=data, headers=headers, deadline=10) return result.status_code, result.content class RobotVerifyTokenHandler(webapp.RequestHandler): """Handler for the token_verify request.""" def __init__(self, robot): """Initializes self with a specific robot.""" self._robot = robot def get(self): """Handles the get event for debugging. Ops usually too long.""" token, st = self._robot.get_verification_token_info() logging.info('token=' + token) if token is None: self.error(404) self.response.out.write('No token set') return if not st is None: if self.request.get('st') != st: self.response.out.write('Invalid st value passed') return self.response.out.write(token) def create_robot_webapp(robot, debug=False, extra_handlers=None): """Returns an instance of webapp.WSGIApplication with robot handlers.""" if not extra_handlers: extra_handlers = [] return webapp.WSGIApplication([('.*/_wave/capabilities.xml', lambda: CapabilitiesHandler( robot.capabilities_xml, 'application/xml')), ('.*/_wave/robot/profile', lambda: ProfileHandler( robot.profile_json, 'application/json')), ('.*/_wave/robot/jsonrpc', lambda: RobotEventHandler(robot)), ('.*/_wave/verify_token', lambda: RobotVerifyTokenHandler(robot)), ] + extra_handlers, debug=debug) def run(robot, debug=False, log_errors=True, extra_handlers=None): """Sets up the webapp handlers for this robot and starts listening. A robot is typically setup in the following steps: 1. Instantiate and define robot. 2. Register various handlers that it is interested in. 3. Call Run, which will setup the handlers for the app. For example: robot = Robot('Terminator', image_url='http://www.sky.net/models/t800.png', profile_url='http://www.sky.net/models/t800.html') robot.register_handler(WAVELET_PARTICIPANTS_CHANGED, KillParticipant) run(robot) Args: robot: the robot to run. This robot is modified to use app engines urlfetch for posting http. debug: Optional variable that defaults to False and is passed through to the webapp application to determine if it should show debug info. log_errors: Optional flag that defaults to True and determines whether a default handlers to catch errors should be setup that uses the app engine logging to log errors. extra_handlers: Optional list of tuples that are passed to the webapp to install more handlers. For example, passing [('/about', AboutHandler),] would install an extra about handler for the robot. """ # App Engine expects to construct a class with no arguments, so we # pass a lambda that constructs the appropriate handler with # arguments from the enclosing scope. if log_errors: robot.register_handler(events.OperationError, operation_error_handler) robot.http_post = appengine_post app = create_robot_webapp(robot, debug, extra_handlers) run_wsgi_app(app)
[ [ 8, 0, 0.0846, 0.005, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0995, 0.005, 0, 0.66, 0.0714, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.1045, 0.005, 0, 0.66, ...
[ "\"\"\"A module to run wave robots on app engine.\"\"\"", "import logging", "import sys", "import events", "from google.appengine.api import urlfetch", "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "class CapabilitiesHandler(webapp.RequestHandl...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the ops module.""" import unittest import ops class TestOperation(unittest.TestCase): """Test case for Operation class.""" def testFields(self): op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02', {'waveId': 'wavelet-id', 'title': 'a title'}) self.assertEqual(ops.WAVELET_SET_TITLE, op.method) self.assertEqual('opid02', op.id) self.assertEqual(2, len(op.params)) def testConstructModifyTag(self): q = ops.OperationQueue() op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag') self.assertEqual(3, len(op.params)) op = q.wavelet_modify_tag( 'waveid', 'waveletid', 'tag', modify_how='remove') self.assertEqual(4, len(op.params)) def testConstructRobotFetchWave(self): q = ops.OperationQueue('proxyid') op = q.robot_fetch_wave('wave1', 'wavelet1') self.assertEqual(3, len(op.params)) self.assertEqual('proxyid', op.params['proxyingFor']) self.assertEqual('wave1', op.params['waveId']) self.assertEqual('wavelet1', op.params['waveletId']) class TestOperationQueue(unittest.TestCase): """Test case for OperationQueue class.""" def testSerialize(self): q = ops.OperationQueue() q.set_capability_hash('hash') op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag') json = q.serialize() self.assertEqual(2, len(json)) self.assertEqual('robot.notifyCapabilitiesHash', json[0]['method']) self.assertEqual('hash', json[0]['params']['capabilitiesHash']) self.assertEqual(ops.PROTOCOL_VERSION, json[0]['params']['protocolVersion']) self.assertEqual('wavelet.modifyTag', json[1]['method']) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.2537, 0.0149, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2985, 0.0149, 0, 0.66, 0.2, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.3284, 0.0149, 0, 0.66, ...
[ "\"\"\"Unit tests for the ops module.\"\"\"", "import unittest", "import ops", "class TestOperation(unittest.TestCase):\n \"\"\"Test case for Operation class.\"\"\"\n\n def testFields(self):\n op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02',\n {'waveId': 'wavelet-id',\n ...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility library containing various helpers used by the API.""" import re CUSTOM_SERIALIZE_METHOD_NAME = 'serialize' MARKUP_RE = re.compile(r'<([^>]*?)>') def force_unicode(object): """ Return the Unicode string version of object, with UTF-8 encoding. """ if isinstance(object, unicode): return object return unicode(str(object), 'utf-8') def parse_markup(markup): """Parses a bit of markup into robot compatible text. For now this is a rough approximation. """ def replace_tag(group): if not group.groups: return '' tag = group.groups()[0].split(' ', 1)[0] if (tag == 'p' or tag == 'br'): return '\n' return '' return MARKUP_RE.sub(replace_tag, markup) def is_iterable(inst): """Returns whether or not this is a list, tuple, set or dict . Note that this does not return true for strings. """ return hasattr(inst, '__iter__') def is_dict(inst): """Returns whether or not the specified instance is a dict.""" return hasattr(inst, 'iteritems') def is_user_defined_new_style_class(obj): """Returns whether or not the specified instance is a user-defined type.""" return type(obj).__module__ != '__builtin__' def lower_camel_case(s): """Converts a string to lower camel case. Examples: foo => foo foo_bar => fooBar foo__bar => fooBar foo_bar_baz => fooBarBaz Args: s: The string to convert to lower camel case. Returns: The lower camel cased string. """ return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_')) def non_none_dict(d): """return a copy of the dictionary without none values.""" return dict([a for a in d.items() if not a[1] is None]) def _serialize_attributes(obj): """Serializes attributes of an instance. Iterates all attributes of an object and invokes serialize if they are public and not callable. Args: obj: The instance to serialize. Returns: The serialized object. """ data = {} for attr_name in dir(obj): if attr_name.startswith('_'): continue attr = getattr(obj, attr_name) if attr is None or callable(attr): continue # Looks okay, serialize it. data[lower_camel_case(attr_name)] = serialize(attr) return data def _serialize_dict(d): """Invokes serialize on all of its key/value pairs. Args: d: The dict instance to serialize. Returns: The serialized dict. """ data = {} for k, v in d.items(): data[lower_camel_case(k)] = serialize(v) return data def serialize(obj): """Serializes any instance. If this is a user-defined instance type, it will first check for a custom Serialize() function and use that if it exists. Otherwise, it will invoke serialize all of its public attributes. Lists and dicts are serialized trivially. Args: obj: The instance to serialize. Returns: The serialized object. """ if is_user_defined_new_style_class(obj): if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME): method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME) if callable(method): return method() return _serialize_attributes(obj) elif is_dict(obj): return _serialize_dict(obj) elif is_iterable(obj): return [serialize(v) for v in obj] return obj class StringEnum(object): """Enum like class that is configured with a list of values. This class effectively implements an enum for Elements, except for that the actual values of the enums will be the string values. """ def __init__(self, *values): for name in values: setattr(self, name, name)
[ [ 8, 0, 0.1069, 0.0063, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1195, 0.0063, 0, 0.66, 0.0714, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 14, 0, 0.1321, 0.0063, 0, 0.6...
[ "\"\"\"Utility library containing various helpers used by the API.\"\"\"", "import re", "CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'", "MARKUP_RE = re.compile(r'<([^>]*?)>')", "def force_unicode(object):\n \"\"\" Return the Unicode string version of object, with UTF-8 encoding. \"\"\"\n if isinstance(objec...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Declares the api package."""
[ [ 8, 0, 1, 0.0588, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ] ]
[ "\"\"\"Declares the api package.\"\"\"" ]
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the blip module.""" import unittest import blip import element import ops import simplejson TEST_BLIP_DATA = { 'childBlipIds': [], 'content': '\nhello world!\nanother line', 'contributors': ['robot@test.com', 'user@test.com'], 'creator': 'user@test.com', 'lastModifiedTime': 1000, 'parentBlipId': None, 'annotations': [{'range': {'start': 2, 'end': 3}, 'name': 'key', 'value': 'val'}], 'waveId': 'test.com!w+g3h3im', 'waveletId': 'test.com!root+conv', 'elements':{'14':{'type':'GADGET','properties':{'url':'http://a/b.xml'}}}, } CHILD_BLIP_ID = 'b+42' ROOT_BLIP_ID = 'b+43' class TestBlip(unittest.TestCase): """Tests the primary data structures for the wave model.""" def assertBlipStartswith(self, expected, totest): actual = totest.text[:len(expected)] self.assertEquals(expected, actual) def new_blip(self, **args): """Create a blip for testing.""" data = TEST_BLIP_DATA.copy() data.update(args) res = blip.Blip(data, self.all_blips, self.operation_queue) self.all_blips[res.blip_id] = res return res def setUp(self): self.all_blips = {} self.operation_queue = ops.OperationQueue() def testBlipProperties(self): root = self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID]) child = self.new_blip(blipId=CHILD_BLIP_ID, parentBlipId=ROOT_BLIP_ID) self.assertEquals(ROOT_BLIP_ID, root.blip_id) self.assertEquals(set([CHILD_BLIP_ID]), root.child_blip_ids) self.assertEquals(set(TEST_BLIP_DATA['contributors']), root.contributors) self.assertEquals(TEST_BLIP_DATA['creator'], root.creator) self.assertEquals(TEST_BLIP_DATA['content'], root.text) self.assertEquals(TEST_BLIP_DATA['lastModifiedTime'], root.last_modified_time) self.assertEquals(TEST_BLIP_DATA['parentBlipId'], root.parent_blip_id) self.assertEquals(TEST_BLIP_DATA['waveId'], root.wave_id) self.assertEquals(TEST_BLIP_DATA['waveletId'], root.wavelet_id) self.assertEquals(TEST_BLIP_DATA['content'][3], root[3]) self.assertEquals(element.Gadget.class_type, root[14].type) self.assertEquals('http://a/b.xml', root[14].url) self.assertEquals('a', root.text[14]) self.assertEquals(len(TEST_BLIP_DATA['content']), len(root)) self.assertTrue(root.is_root()) self.assertFalse(child.is_root()) self.assertEquals(root, child.parent_blip) def testBlipSerialize(self): root = self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID]) serialized = root.serialize() unserialized = blip.Blip(serialized, self.all_blips, self.operation_queue) self.assertEquals(root.blip_id, unserialized.blip_id) self.assertEquals(root.child_blip_ids, unserialized.child_blip_ids) self.assertEquals(root.contributors, unserialized.contributors) self.assertEquals(root.creator, unserialized.creator) self.assertEquals(root.text, unserialized.text) self.assertEquals(root.last_modified_time, unserialized.last_modified_time) self.assertEquals(root.parent_blip_id, unserialized.parent_blip_id) self.assertEquals(root.wave_id, unserialized.wave_id) self.assertEquals(root.wavelet_id, unserialized.wavelet_id) self.assertTrue(unserialized.is_root()) def testDocumentOperations(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) newlines = [x for x in blip.find('\n')] self.assertEquals(2, len(newlines)) blip.first('world').replace('jupiter') bits = blip.text.split('\n') self.assertEquals(3, len(bits)) self.assertEquals('hello jupiter!', bits[1]) blip.range(2, 5).delete() self.assertBlipStartswith('\nho jupiter', blip) blip.first('ho').insert_after('la') self.assertBlipStartswith('\nhola jupiter', blip) blip.at(3).insert(' ') self.assertBlipStartswith('\nho la jupiter', blip) def testElementHandling(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) url = 'http://www.test.com/image.png' org_len = len(blip) blip.append(element.Image(url=url)) elems = [elem for elem in blip.find(element.Image, url=url)] self.assertEquals(1, len(elems)) elem = elems[0] self.assertTrue(isinstance(elem, element.Image)) blip.at(1).insert('twelve chars') self.assertTrue(blip.text.startswith('\ntwelve charshello')) elem = blip[org_len + 12].value() self.assertTrue(isinstance(elem, element.Image)) blip.first('twelve ').delete() self.assertTrue(blip.text.startswith('\nchars')) elem = blip[org_len + 12 - len('twelve ')].value() self.assertTrue(isinstance(elem, element.Image)) blip.first('chars').replace(element.Image(url=url)) elems = [elem for elem in blip.find(element.Image, url=url)] self.assertEquals(2, len(elems)) self.assertTrue(blip.text.startswith('\n hello')) elem = blip[1].value() self.assertTrue(isinstance(elem, element.Image)) def testAnnotationHandling(self): key = 'style/fontWeight' def get_bold(): for an in blip.annotations[key]: if an.value == 'bold': return an return None json = ('[{"range":{"start":3,"end":6},"name":"%s","value":"bold"}]' % key) blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json)) self.assertEquals(1, len(blip.annotations)) self.assertNotEqual(None, get_bold().value) self.assertTrue(key in blip.annotations) # extend the bold annotation by adding: blip.range(5, 8).annotate(key, 'bold') self.assertEquals(1, len(blip.annotations)) self.assertEquals(8, get_bold().end) # clip by adding a same keyed: blip[4:12].annotate(key, 'italic') self.assertEquals(2, len(blip.annotations[key])) self.assertEquals(4, get_bold().end) # now split the italic one: blip.range(6, 7).clear_annotation(key) self.assertEquals(3, len(blip.annotations[key])) # test names and iteration self.assertEquals(1, len(blip.annotations.names())) self.assertEquals(3, len([x for x in blip.annotations])) blip[3: 5].annotate('foo', 'bar') self.assertEquals(2, len(blip.annotations.names())) self.assertEquals(4, len([x for x in blip.annotations])) blip[3: 5].clear_annotation('foo') # clear the whole thing blip.all().clear_annotation(key) # getting to the key should now throw an exception self.assertRaises(KeyError, blip.annotations.__getitem__, key) def testBlipOperations(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) self.assertEquals(1, len(self.all_blips)) otherblip = blip.reply() otherblip.append('hello world') self.assertEquals('hello world', otherblip.text) self.assertEquals(blip.blip_id, otherblip.parent_blip_id) self.assertEquals(2, len(self.all_blips)) inline = blip.insert_inline_blip(3) self.assertEquals(blip.blip_id, inline.parent_blip_id) self.assertEquals(3, len(self.all_blips)) def testInsertInlineBlipCantInsertAtTheBeginning(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) self.assertEquals(1, len(self.all_blips)) self.assertRaises(IndexError, blip.insert_inline_blip, 0) self.assertEquals(1, len(self.all_blips)) def testDocumentModify(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) blip.all().replace('a text with text and then some text') blip[7].insert('text ') blip.all('text').replace('thing') self.assertEquals('a thing thing with thing and then some thing', blip.text) def testIteration(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) blip.all().replace('aaa 012 aaa 345 aaa 322') count = 0 prev = -1 for start, end in blip.all('aaa'): count += 1 self.assertTrue(prev < start) prev = start self.assertEquals(3, count) def testBlipRefValue(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) content = blip.text content = content[:4] + content[5:] del blip[4] self.assertEquals(content, blip.text) content = content[:2] + content[3:] del blip[2:3] self.assertEquals(content, blip.text) blip[2:3] = 'bike' content = content[:2] + 'bike' + content[3:] self.assertEquals(content, blip.text) url = 'http://www.test.com/image.png' blip.append(element.Image(url=url)) self.assertEqual(url, blip.first(element.Image).url) url2 = 'http://www.test.com/another.png' blip[-1].update_element({'url': url2}) self.assertEqual(url2, blip.first(element.Image).url) self.assertTrue(blip[3:5] == blip.text[3:5]) blip.append('geheim') self.assertTrue(blip.first('geheim')) self.assertFalse(blip.first(element.Button)) blip.append(element.Button(name='test1', value='Click')) button = blip.first(element.Button) button.update_element({'name': 'test2'}) self.assertEqual('test2', button.name) def testReplace(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) blip.all().replace('\nxxxx') blip.all('yyy').replace('zzz') self.assertEqual('\nxxxx', blip.text) def testDeleteRangeThatSpansAcrossAnnotationEndPoint(self): json = ('[{"range":{"start":1,"end":3},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 4).delete() self.assertEqual('\nF bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(2, blip.annotations['style'][0].end) def testInsertBeforeAnnotationStartPoint(self): json = ('[{"range":{"start":4,"end":9},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.at(4).insert('d and') self.assertEqual('\nFood and bar.', blip.text) self.assertEqual(9, blip.annotations['style'][0].start) self.assertEqual(14, blip.annotations['style'][0].end) def testDeleteRangeInsideAnnotation(self): json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 4).delete() self.assertEqual('\nF bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(3, blip.annotations['style'][0].end) def testReplaceInsideAnnotation(self): json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 4).replace('ooo') self.assertEqual('\nFooo bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(6, blip.annotations['style'][0].end) blip.range(2, 5).replace('o') self.assertEqual('\nFo bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(4, blip.annotations['style'][0].end) def testReplaceSpanAnnotation(self): json = ('[{"range":{"start":1,"end":4},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 9).replace('') self.assertEqual('\nF', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(2, blip.annotations['style'][0].end) def testSearchWithNoMatchShouldNotGenerateOperation(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) self.assertEqual(-1, blip.text.find(':(')) self.assertEqual(0, len(self.operation_queue)) blip.all(':(').replace(':)') self.assertEqual(0, len(self.operation_queue)) def testBlipsRemoveWithId(self): blip_dict = { ROOT_BLIP_ID: self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID]), CHILD_BLIP_ID: self.new_blip(blipId=CHILD_BLIP_ID, parentBlipId=ROOT_BLIP_ID) } blips = blip.Blips(blip_dict) blips._remove_with_id(CHILD_BLIP_ID) self.assertEqual(1, len(blips)) self.assertEqual(0, len(blips[ROOT_BLIP_ID].child_blip_ids)) def testAppendMarkup(self): blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.') markup = '<p><span>markup<span> content</p>' blip.append_markup(markup) self.assertEqual(1, len(self.operation_queue)) self.assertEqual('\nFoo bar.\nmarkup content', blip.text) def testBundledAnnotations(self): blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.') blip.append('not bold') blip.append('bold', bundled_annotations=[('style/fontWeight', 'bold')]) self.assertEqual(2, len(blip.annotations)) self.assertEqual('bold', blip.annotations['style/fontWeight'][0].value) def testInlineBlipOffset(self): offset = 14 self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID], elements={str(offset): {'type': element.Element.INLINE_BLIP_TYPE, 'properties': {'id': CHILD_BLIP_ID}}}) child = self.new_blip(blipId=CHILD_BLIP_ID, parentBlipId=ROOT_BLIP_ID) self.assertEqual(offset, child.inline_blip_offset) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.0455, 0.0027, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0535, 0.0027, 0, 0.66, 0.1, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0588, 0.0027, 0, 0.66, ...
[ "\"\"\"Unit tests for the blip module.\"\"\"", "import unittest", "import blip", "import element", "import ops", "import simplejson", "TEST_BLIP_DATA = {\n 'childBlipIds': [],\n 'content': '\\nhello world!\\nanother line',\n 'contributors': ['robot@test.com', 'user@test.com'],\n 'creator': '...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module defines the ModuleTestRunnerClass.""" import unittest class ModuleTestRunner(object): """Responsible for executing all test cases in a list of modules.""" def __init__(self, module_list=None, module_test_settings=None): self.modules = module_list or [] self.settings = module_test_settings or {} def RunAllTests(self): """Executes all tests present in the list of modules.""" runner = unittest.TextTestRunner() for module in self.modules: for setting, value in self.settings.iteritems(): try: setattr(module, setting, value) except AttributeError: print '\nError running ' + str(setting) print '\nRunning all tests in module', module.__name__ runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
[ [ 8, 0, 0.425, 0.025, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.5, 0.025, 0, 0.66, 0.5, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 3, 0, 0.7875, 0.45, 0, 0.66, 1, ...
[ "\"\"\"Module defines the ModuleTestRunnerClass.\"\"\"", "import unittest", "class ModuleTestRunner(object):\n \"\"\"Responsible for executing all test cases in a list of modules.\"\"\"\n\n def __init__(self, module_list=None, module_test_settings=None):\n self.modules = module_list or []\n self.setting...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines event types that are sent from the wave server. This module defines all of the event types currently supported by the wave server. Each event type is sub classed from Event and has its own properties depending on the type. """ class Context(object): """Specifies constants representing different context requests.""" #: Requests the root blip. ROOT = 'ROOT' #: Requests the parent blip of the event blip. PARENT = 'PARENT' #: Requests the siblings blip of the event blip. SIBLINGS = 'SIBLINGS' #: Requests the child blips of the event blip. CHILDREN = 'CHILDREN' #: Requests the event blip itself. SELF = 'SELF' #: Requests all of the blips of the event wavelet. ALL = 'ALL' class Event(object): """Object describing a single event. Attributes: modified_by: Participant id that caused this event. timestamp: Timestamp that this event occurred on the server. type: Type string of this event. properties: Dictionary of all extra properties. Typically the derrived event type should have these explicitly set as attributes, but experimental features might appear in properties before that. blip_id: The blip_id of the blip for blip related events or the root blip for wavelet related events. blip: If available, the blip with id equal to the events blip_id. proxying_for: If available, the proxyingFor id of the robot that caused the event. """ def __init__(self, json, wavelet): """Inits this event with JSON data. Args: json: JSON data from Wave server. """ self.modified_by = json.get('modifiedBy') self.timestamp = json.get('timestamp', 0) self.type = json.get('type') self.raw_data = json self.properties = json.get('properties', {}) self.blip_id = self.properties.get('blipId') self.blip = wavelet.blips.get(self.blip_id) self.proxying_for = json.get('proxyingFor') class WaveletBlipCreated(Event): """Event triggered when a new blip is created. Attributes: new_blip_id: The id of the newly created blip. new_blip: If in context, the actual new blip. """ type = 'WAVELET_BLIP_CREATED' def __init__(self, json, wavelet): super(WaveletBlipCreated, self).__init__(json, wavelet) self.new_blip_id = self.properties['newBlipId'] self.new_blip = wavelet.blips.get(self.new_blip_id) class WaveletBlipRemoved(Event): """Event triggered when a new blip is removed. Attributes: removed_blip_id: the id of the removed blip removed_blip: if in context, the removed blip """ type = 'WAVELET_BLIP_REMOVED' def __init__(self, json, wavelet): super(WaveletBlipRemoved, self).__init__(json, wavelet) self.removed_blip_id = self.properties['removedBlipId'] self.removed_blip = wavelet.blips.get(self.removed_blip_id) class WaveletParticipantsChanged(Event): """Event triggered when the participants on a wave change. Attributes: participants_added: List of participants added. participants_removed: List of participants removed. """ type = 'WAVELET_PARTICIPANTS_CHANGED' def __init__(self, json, wavelet): super(WaveletParticipantsChanged, self).__init__(json, wavelet) self.participants_added = self.properties['participantsAdded'] self.participants_removed = self.properties['participantsRemoved'] class WaveletSelfAdded(Event): """Event triggered when the robot is added to the wavelet.""" type = 'WAVELET_SELF_ADDED' class WaveletSelfRemoved(Event): """Event triggered when the robot is removed from the wavelet.""" type = 'WAVELET_SELF_REMOVED' class WaveletTitleChanged(Event): """Event triggered when the title of the wavelet has changed. Attributes: title: The new title. """ type = 'WAVELET_TITLE_CHANGED' def __init__(self, json, wavelet): super(WaveletTitleChanged, self).__init__(json, wavelet) self.title = self.properties['title'] class BlipContributorsChanged(Event): """Event triggered when the contributors to this blip change. Attributes: contributors_added: List of contributors that were added. contributors_removed: List of contributors that were removed. """ type = 'BLIP_CONTRIBUTORS_CHANGED' def __init__(self, json, wavelet): super(BlipContributorsChanged, self).__init__(json, wavelet) self.contibutors_added = self.properties['contributorsAdded'] self.contibutors_removed = self.properties['contributorsRemoved'] class BlipSubmitted(Event): """Event triggered when a blip is submitted.""" type = 'BLIP_SUBMITTED' class DocumentChanged(Event): """Event triggered when a document is changed. This event is fired after any changes in the document and should be used carefully to keep the amount of traffic to the robot reasonable. Use filters where appropriate. """ type = 'DOCUMENT_CHANGED' class FormButtonClicked(Event): """Event triggered when a form button is clicked. Attributes: button_name: The name of the button that was clicked. """ type = 'FORM_BUTTON_CLICKED' def __init__(self, json, wavelet): super(FormButtonClicked, self).__init__(json, wavelet) self.button_name = self.properties['buttonName'] class GadgetStateChanged(Event): """Event triggered when the state of a gadget changes. Attributes: index: The index of the gadget that changed in the document. old_state: The old state of the gadget. """ type = 'GADGET_STATE_CHANGED' def __init__(self, json, wavelet): super(GadgetStateChanged, self).__init__(json, wavelet) self.index = self.properties['index'] self.old_state = self.properties['oldState'] class AnnotatedTextChanged(Event): """Event triggered when text with an annotation has changed. This is mainly useful in combination with a filter on the name of the annotation. Attributes: name: The name of the annotation. value: The value of the annotation that changed. """ type = 'ANNOTATED_TEXT_CHANGED' def __init__(self, json, wavelet): super(AnnotatedTextChanged, self).__init__(json, wavelet) self.name = self.properties['name'] self.value = self.properties.get('value') class OperationError(Event): """Triggered when an event on the server occurred. Attributes: operation_id: The operation id of the failing operation. error_message: More information as to what went wrong. """ type = 'OPERATION_ERROR' def __init__(self, json, wavelet): super(OperationError, self).__init__(json, wavelet) self.operation_id = self.properties['operationId'] self.error_message = self.properties['message'] class WaveletCreated(Event): """Triggered when a new wavelet is created. This event is only triggered if the robot creates a new wavelet and can be used to initialize the newly created wave. wavelets created by other participants remain invisible to the robot until the robot is added to the wave in which case WaveletSelfAdded is triggered. Attributes: message: Whatever string was passed into the new_wave call as message (if any). """ type = 'WAVELET_CREATED' def __init__(self, json, wavelet): super(WaveletCreated, self).__init__(json, wavelet) self.message = self.properties['message'] class WaveletFetched(Event): """Triggered when a new wavelet is fetched. This event is triggered after a robot requests to see another wavelet. The robot has to be on the other wavelet already. Attributes: message: Whatever string was passed into the new_wave call as message (if any). """ type = 'WAVELET_FETCHED' def __init__(self, json, wavelet): super(WaveletFetched, self).__init__(json, wavelet) self.message = self.properties['message'] class WaveletTagsChanged(Event): """Event triggered when the tags on a wavelet change.""" type = 'WAVELET_TAGS_CHANGED' def __init__(self, json, wavelet): super(WaveletTagsChanged, self).__init__(json, wavelet) def is_event(cls): """Returns whether the passed class is an event.""" try: if not issubclass(cls, Event): return False return hasattr(cls, 'type') except TypeError: return False ALL = [item for item in globals().copy().values() if is_event(item)]
[ [ 8, 0, 0.0648, 0.0199, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.1063, 0.0498, 0, 0.66, 0.05, 560, 0, 0, 0, 0, 186, 0, 0 ], [ 8, 1, 0.0864, 0.0033, 1, 0.64, ...
[ "\"\"\"Defines event types that are sent from the wave server.\n\nThis module defines all of the event types currently supported by the wave\nserver. Each event type is sub classed from Event and has its own\nproperties depending on the type.\n\"\"\"", "class Context(object):\n \"\"\"Specifies constants represen...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import element import errors import util class Annotation(object): """Models an annotation on a document. Annotations are key/value pairs over a range of content. Annotations can be used to store data or to be interpreted by a client when displaying the data. """ # Use the following constants to control the display of the client #: Reserved annotation for setting background color of text. BACKGROUND_COLOR = "style/backgroundColor" #: Reserved annotation for setting color of text. COLOR = "style/color" #: Reserved annotation for setting font family of text. FONT_FAMILY = "style/fontFamily" #: Reserved annotation for setting font family of text. FONT_SIZE = "style/fontSize" #: Reserved annotation for setting font style of text. FONT_STYLE = "style/fontStyle" #: Reserved annotation for setting font weight of text. FONT_WEIGHT = "style/fontWeight" #: Reserved annotation for setting text decoration. TEXT_DECORATION = "style/textDecoration" #: Reserved annotation for setting vertical alignment. VERTICAL_ALIGN = "style/verticalAlign" def __init__(self, name, value, start, end): self._name = name self._value = value self._start = start self._end = end @property def name(self): return self._name @property def value(self): return self._value @property def start(self): return self._start @property def end(self): return self._end def _shift(self, where, inc): """Shift annotation by 'inc' if it (partly) overlaps with 'where'.""" if self._start >= where: self._start += inc if self._end >= where: self._end += inc def serialize(self): """Serializes the annotation. Returns: A dict containing the name, value, and range values. """ return {'name': self._name, 'value': self._value, 'range': {'start': self._start, 'end': self._end}} class Annotations(object): """A dictionary-like object containing the annotations, keyed by name.""" def __init__(self, operation_queue, blip): self._operation_queue = operation_queue self._blip = blip self._store = {} def __contains__(self, what): if isinstance(what, Annotation): what = what.name return what in self._store def _add_internal(self, name, value, start, end): """Internal add annotation does not send out operations.""" if name in self._store: # TODO: use bisect to make this more efficient. new_list = [] for existing in self._store[name]: if start > existing.end or end < existing.start: new_list.append(existing) else: if existing.value == value: # merge the annotations: start = min(existing.start, start) end = max(existing.end, end) else: # chop the bits off the existing annotation if existing.start < start: new_list.append(Annotation( existing.name, existing.value, existing.start, start)) if existing.end > end: new_list.append(Annotation( existing.name, existing.value, existing.end, end)) new_list.append(Annotation(name, value, start, end)) self._store[name] = new_list else: self._store[name] = [Annotation(name, value, start, end)] def _delete_internal(self, name, start=0, end=-1): """Remove the passed annotaion from the internal representation.""" if not name in self._store: return if end < 0: end = len(self._blip) + end new_list = [] for a in self._store[name]: if start > a.end or end < a.start: new_list.append(a) elif start < a.start and end > a.end: continue else: if a.start < start: new_list.append(Annotation(name, a.value, a.start, start)) if a.end > end: new_list.append(Annotation(name, a.value, end, a.end)) if new_list: self._store[name] = new_list else: del self._store[name] def _shift(self, where, inc): """Shift annotation by 'inc' if it (partly) overlaps with 'where'.""" for annotations in self._store.values(): for annotation in annotations: annotation._shift(where, inc) # Merge fragmented annotations that should be contiguous, for example: # Annotation('foo', 'bar', 1, 2) and Annotation('foo', 'bar', 2, 3). for name, annotations in self._store.items(): new_list = [] for i, annotation in enumerate(annotations): name = annotation.name value = annotation.value start = annotation.start end = annotation.end # Find the last end index. for j, next_annotation in enumerate(annotations[i + 1:]): # Not contiguous, skip. if (end < next_annotation.start): break # Contiguous, merge. if (end == next_annotation.start and value == next_annotation.value): end = next_annotation.end del annotations[j] new_list.append(Annotation(name, value, start, end)) self._store[name] = new_list def __len__(self): return len(self._store) def __getitem__(self, key): return self._store[key] def __iter__(self): for l in self._store.values(): for ann in l: yield ann def names(self): """Return the names of the annotations in the store.""" return self._store.keys() def serialize(self): """Return a list of the serialized annotations.""" res = [] for v in self._store.values(): res += [a.serialize() for a in v] return res class Blips(object): """A dictionary-like object containing the blips, keyed on blip ID.""" def __init__(self, blips): self._blips = blips def __getitem__(self, blip_id): return self._blips[blip_id] def __iter__(self): return self._blips.__iter__() def __len__(self): return len(self._blips) def _add(self, ablip): self._blips[ablip.blip_id] = ablip def _remove_with_id(self, blip_id): del_blip = self._blips[blip_id] if del_blip: # Remove the reference to this blip from its parent. parent_blip = self._blips[blip_id].parent_blip if parent_blip: parent_blip._child_blip_ids.remove(blip_id) del self._blips[blip_id] def get(self, blip_id, default_value=None): """Retrieves a blip. Returns: A Blip object. If none found for the ID, it returns None, or if default_value is specified, it returns that. """ return self._blips.get(blip_id, default_value) def serialize(self): """Serializes the blips. Returns: A dict of serialized blips. """ res = {} for blip_id, item in self._blips.items(): res[blip_id] = item.serialize() return res class BlipRefs(object): """Represents a set of references to contents in a blip. For example, a BlipRefs instance can represent the results of a search, an explicitly set range, a regular expression, or refer to the entire blip. BlipRefs are used to express operations on a blip in a consistent way that can easily be transfered to the server. The typical way of creating a BlipRefs object is to use selector methods on the Blip object. Developers will not usually instantiate a BlipRefs object directly. """ DELETE = 'DELETE' REPLACE = 'REPLACE' INSERT = 'INSERT' INSERT_AFTER = 'INSERT_AFTER' ANNOTATE = 'ANNOTATE' CLEAR_ANNOTATION = 'CLEAR_ANNOTATION' UPDATE_ELEMENT = 'UPDATE_ELEMENT' def __init__(self, blip, maxres=1): self._blip = blip self._maxres = maxres @classmethod def all(cls, blip, findwhat, maxres=-1, **restrictions): """Construct an instance representing the search for text or elements.""" obj = cls(blip, maxres) obj._findwhat = findwhat obj._restrictions = restrictions obj._hits = lambda: obj._find(findwhat, maxres, **restrictions) if findwhat is None: # No findWhat, take the entire blip obj._params = {} else: query = {'maxRes': maxres} if isinstance(findwhat, basestring): query['textMatch'] = findwhat else: query['elementMatch'] = findwhat.class_type query['restrictions'] = restrictions obj._params = {'modifyQuery': query} return obj @classmethod def range(cls, blip, begin, end): """Constructs an instance representing an explicitly set range.""" obj = cls(blip) obj._begin = begin obj._end = end obj._hits = lambda: [(begin, end)] obj._params = {'range': {'start': begin, 'end': end}} return obj def _elem_matches(self, elem, clz, **restrictions): if not isinstance(elem, clz): return False for key, val in restrictions.items(): if getattr(elem, key) != val: return False return True def _find(self, what, maxres=-1, **restrictions): """Iterates where 'what' occurs in the associated blip. What can be either a string or a class reference. Examples: self._find('hello') will return the first occurence of the word hello self._find(element.Gadget, url='http://example.com/gadget.xml') will return the first gadget that has as url example.com. Args: what: what to search for. Can be a class or a string. The class should be an element from element.py maxres: number of results to return at most, or <= 0 for all. restrictions: if what specifies a class, further restrictions of the found instances. Yields: Tuples indicating the range of the matches. For a one character/element match at position x, (x, x+1) is yielded. """ blip = self._blip if what is None: yield 0, len(blip) raise StopIteration if isinstance(what, basestring): idx = blip._content.find(what) count = 0 while idx != -1: yield idx, idx + len(what) count += 1 if count == maxres: raise StopIteration idx = blip._content.find(what, idx + len(what)) else: count = 0 for idx, el in blip._elements.items(): if self._elem_matches(el, what, **restrictions): yield idx, idx + 1 count += 1 if count == maxres: raise StopIteration def _execute(self, modify_how, what, bundled_annotations=None): """Executes this BlipRefs object. Args: modify_how: What to do. Any of the operation declared at the top. what: Depending on the operation. For delete, has to be None. For the others it is a singleton, a list or a function returning what to do; for ANNOTATE tuples of (key, value), for the others either string or elements. If what is a function, it takes three parameters, the content of the blip, the beginning of the matching range and the end. bundled_annotations: Annotations to apply immediately. Raises: IndexError when trying to access content outside of the blip. ValueError when called with the wrong values. Returns: self for chainability. """ blip = self._blip if modify_how != BlipRefs.DELETE: if type(what) != list: what = [what] next_index = 0 matched = [] # updated_elements is used to store the element type of the # element to update updated_elements = [] # For now, if we find one markup, we'll use it everywhere. next = None hit_found = False for start, end in self._hits(): hit_found = True if start < 0: start += len(blip) if end == 0: end += len(blip) if end < 0: end += len(blip) if len(blip) == 0: if start != 0 or end != 0: raise IndexError('Start and end have to be 0 for empty document') elif start < 0 or end < 1 or start >= len(blip) or end > len(blip): raise IndexError('Position outside the document') if modify_how == BlipRefs.DELETE: for i in range(start, end): if i in blip._elements: del blip._elements[i] blip._delete_annotations(start, end) blip._shift(end, start - end) blip._content = blip._content[:start] + blip._content[end:] else: if callable(what): next = what(blip._content, start, end) matched.append(next) else: next = what[next_index] next_index = (next_index + 1) % len(what) if isinstance(next, str): next = util.force_unicode(next) if modify_how == BlipRefs.ANNOTATE: key, value = next blip.annotations._add_internal(key, value, start, end) elif modify_how == BlipRefs.CLEAR_ANNOTATION: blip.annotations._delete_internal(next, start, end) elif modify_how == BlipRefs.UPDATE_ELEMENT: el = blip._elements.get(start) if not element: raise ValueError('No element found at index %s' % start) # the passing around of types this way feels a bit dirty: updated_elements.append(element.Element.from_json({'type': el.type, 'properties': next})) for k, b in next.items(): setattr(el, k, b) else: if modify_how == BlipRefs.INSERT: end = start elif modify_how == BlipRefs.INSERT_AFTER: start = end elif modify_how == BlipRefs.REPLACE: pass else: raise ValueError('Unexpected modify_how: ' + modify_how) if isinstance(next, element.Element): text = ' ' else: text = next # in the case of a replace, and the replacement text is shorter, # delete the delta. if start != end and len(text) < end - start: blip._delete_annotations(start + len(text), end) blip._shift(end, len(text) + start - end) blip._content = blip._content[:start] + text + blip._content[end:] if bundled_annotations: end_annotation = start + len(text) blip._delete_annotations(start, end_annotation) for key, value in bundled_annotations: blip.annotations._add_internal(key, value, start, end_annotation) if isinstance(next, element.Element): blip._elements[start] = next # No match found, return immediately without generating op. if not hit_found: return operation = blip._operation_queue.document_modify(blip.wave_id, blip.wavelet_id, blip.blip_id) for param, value in self._params.items(): operation.set_param(param, value) modify_action = {'modifyHow': modify_how} if modify_how == BlipRefs.DELETE: pass elif modify_how == BlipRefs.UPDATE_ELEMENT: modify_action['elements'] = updated_elements elif (modify_how == BlipRefs.REPLACE or modify_how == BlipRefs.INSERT or modify_how == BlipRefs.INSERT_AFTER): if callable(what): what = matched if what: if not isinstance(next, element.Element): modify_action['values'] = [util.force_unicode(value) for value in what] else: modify_action['elements'] = what elif modify_how == BlipRefs.ANNOTATE: modify_action['values'] = [x[1] for x in what] modify_action['annotationKey'] = what[0][0] elif modify_how == BlipRefs.CLEAR_ANNOTATION: modify_action['annotationKey'] = what[0] if bundled_annotations: modify_action['bundledAnnotations'] = [ {'key': key, 'value': value} for key, value in bundled_annotations] operation.set_param('modifyAction', modify_action) return self def insert(self, what, bundled_annotations=None): """Inserts what at the matched positions.""" return self._execute( BlipRefs.INSERT, what, bundled_annotations=bundled_annotations) def insert_after(self, what, bundled_annotations=None): """Inserts what just after the matched positions.""" return self._execute( BlipRefs.INSERT_AFTER, what, bundled_annotations=bundled_annotations) def replace(self, what, bundled_annotations=None): """Replaces the matched positions with what.""" return self._execute( BlipRefs.REPLACE, what, bundled_annotations=bundled_annotations) def delete(self): """Deletes the content at the matched positions.""" return self._execute(BlipRefs.DELETE, None) def annotate(self, name, value=None): """Annotates the content at the matched positions. You can either specify both name and value to set the same annotation, or supply as the first parameter something that yields name/value pairs. The name and value should both be strings. """ if value is None: what = name else: what = (name, value) return self._execute(BlipRefs.ANNOTATE, what) def clear_annotation(self, name): """Clears the annotation at the matched positions.""" return self._execute(BlipRefs.CLEAR_ANNOTATION, name) def update_element(self, new_values): """Update an existing element with a set of new values.""" return self._execute(BlipRefs.UPDATE_ELEMENT, new_values) def __nonzero__(self): """Return whether we have a value.""" for start, end in self._hits(): return True return False def value(self): """Convenience method to convert a BlipRefs to value of its first match.""" for start, end in self._hits(): if end - start == 1 and start in self._blip._elements: return self._blip._elements[start] else: return self._blip.text[start:end] raise ValueError('BlipRefs has no values') def __getattr__(self, attribute): """Mirror the getattr of value(). This allows for clever things like first(IMAGE).url or blip.annotate_with(key, value).upper() """ return getattr(self.value(), attribute) def __radd__(self, other): """Make it possible to add this to a string.""" return other + self.value() def __cmp__(self, other): """Support comparision with target.""" return cmp(self.value(), other) def __iter__(self): for start_end in self._hits(): yield start_end class Blip(object): """Models a single blip instance. Blips are essentially the documents that make up a conversation. Blips can live in a hierarchy of blips. A root blip has no parent blip id, but all blips have the ids of the wave and wavelet that they are associated with. Blips also contain annotations, content and elements, which are accessed via the Document object. """ def __init__(self, json, other_blips, operation_queue): """Inits this blip with JSON data. Args: json: JSON data dictionary from Wave server. other_blips: A dictionary like object that can be used to resolve ids of blips to blips. operation_queue: an OperationQueue object to store generated operations in. """ self._blip_id = json.get('blipId') self._operation_queue = operation_queue self._child_blip_ids = set(json.get('childBlipIds', [])) self._content = json.get('content', '') self._contributors = set(json.get('contributors', [])) self._creator = json.get('creator') self._last_modified_time = json.get('lastModifiedTime', 0) self._version = json.get('version', 0) self._parent_blip_id = json.get('parentBlipId') self._wave_id = json.get('waveId') self._wavelet_id = json.get('waveletId') if isinstance(other_blips, Blips): self._other_blips = other_blips else: self._other_blips = Blips(other_blips) self._annotations = Annotations(operation_queue, self) for annjson in json.get('annotations', []): r = annjson['range'] self._annotations._add_internal(annjson['name'], annjson['value'], r['start'], r['end']) self._elements = {} json_elements = json.get('elements', {}) for elem in json_elements: self._elements[int(elem)] = element.Element.from_json(json_elements[elem]) self.raw_data = json @property def blip_id(self): """The id of this blip.""" return self._blip_id @property def wave_id(self): """The id of the wave that this blip belongs to.""" return self._wave_id @property def wavelet_id(self): """The id of the wavelet that this blip belongs to.""" return self._wavelet_id @property def child_blip_ids(self): """The set of the ids of this blip's children.""" return self._child_blip_ids @property def child_blips(self): """The set of blips that are children of this blip.""" return set([self._other_blips[blid_id] for blid_id in self._child_blip_ids if blid_id in self._other_blips]) @property def contributors(self): """The set of participant ids that contributed to this blip.""" return self._contributors @property def creator(self): """The id of the participant that created this blip.""" return self._creator @property def last_modified_time(self): """The time in seconds since epoch when this blip was last modified.""" return self._last_modified_time @property def version(self): """The version of this blip.""" return self._version @property def parent_blip_id(self): """The parent blip_id or None if this is the root blip.""" return self._parent_blip_id @property def parent_blip(self): """The parent blip or None if it is the root.""" # if parent_blip_id is None, get will also return None return self._other_blips.get(self._parent_blip_id) @property def inline_blip_offset(self): """The offset in the parent if this blip is inline or -1 if not. If the parent is not in the context, this function will always return -1 since it can't determine the inline blip status. """ parent = self.parent_blip if not parent: return -1 for offset, el in parent._elements.items(): if el.type == element.Element.INLINE_BLIP_TYPE and el.id == self.blip_id: return offset return -1 def is_root(self): """Returns whether this is the root blip of a wavelet.""" return self._parent_blip_id is None @property def annotations(self): """The annotations for this document.""" return self._annotations @property def elements(self): """Returns a list of elements for this document. The elements of a blip are things like forms elements and gadgets that cannot be expressed as plain text. In the text of the blip, you'll typically find a space as a place holder for the element. If you want to retrieve the element at a particular index in the blip, use blip[index].value(). """ return self._elements.values() def __len__(self): return len(self._content) def __getitem__(self, item): """returns a BlipRefs for the given slice.""" if isinstance(item, slice): if item.step: raise errors.Error('Step not supported for blip slices') return self.range(item.start, item.stop) else: return self.at(item) def __setitem__(self, item, value): """short cut for self.range/at().replace(value).""" self.__getitem__(item).replace(value) def __delitem__(self, item): """short cut for self.range/at().delete().""" self.__getitem__(item).delete() def _shift(self, where, inc): """Move element and annotations after 'where' up by 'inc'.""" new_elements = {} for idx, el in self._elements.items(): if idx >= where: idx += inc new_elements[idx] = el self._elements = new_elements self._annotations._shift(where, inc) def _delete_annotations(self, start, end): """Delete all annotations between 'start' and 'end'.""" for annotation_name in self._annotations.names(): self._annotations._delete_internal(annotation_name, start, end) def all(self, findwhat=None, maxres=-1, **restrictions): """Returns a BlipRefs object representing all results for the search. If searching for an element, the restrictions can be used to specify additional element properties to filter on, like the url of a Gadget. """ return BlipRefs.all(self, findwhat, maxres, **restrictions) def first(self, findwhat=None, **restrictions): """Returns a BlipRefs object representing the first result for the search. If searching for an element, the restrictions can be used to specify additional element properties to filter on, like the url of a Gadget. """ return BlipRefs.all(self, findwhat, 1, **restrictions) def at(self, index): """Returns a BlipRefs object representing a 1-character range.""" return BlipRefs.range(self, index, index + 1) def range(self, start, end): """Returns a BlipRefs object representing the range.""" return BlipRefs.range(self, start, end) def serialize(self): """Return a dictionary representation of this blip ready for json.""" return {'blipId': self._blip_id, 'childBlipIds': list(self._child_blip_ids), 'content': self._content, 'creator': self._creator, 'contributors': list(self._contributors), 'lastModifiedTime': self._last_modified_time, 'version': self._version, 'parentBlipId': self._parent_blip_id, 'waveId': self._wave_id, 'waveletId': self._wavelet_id, 'annotations': self._annotations.serialize(), 'elements': dict([(index, e.serialize()) for index, e in self._elements.items()]) } def proxy_for(self, proxy_for_id): """Return a view on this blip that will proxy for the specified id. A shallow copy of the current blip is returned with the proxy_for_id set. Any modifications made to this copy will be done using the proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will be used. """ operation_queue = self._operation_queue.proxy_for(proxy_for_id) res = Blip(json={}, other_blips={}, operation_queue=operation_queue) res._blip_id = self._blip_id res._child_blip_ids = self._child_blip_ids res._content = self._content res._contributors = self._contributors res._creator = self._creator res._last_modified_time = self._last_modified_time res._version = self._version res._parent_blip_id = self._parent_blip_id res._wave_id = self._wave_id res._wavelet_id = self._wavelet_id res._other_blips = self._other_blips res._annotations = self._annotations res._elements = self._elements res.raw_data = self.raw_data return res @property def text(self): """Returns the raw text content of this document.""" return self._content def find(self, what, **restrictions): """Iterate to matching bits of contents. Yield either elements or pieces of text. """ br = BlipRefs.all(self, what, **restrictions) for start, end in br._hits(): if end - start == 1 and start in self._elements: yield self._elements[start] else: yield self._content[start:end] raise StopIteration def append(self, what, bundled_annotations=None): """Convenience method covering a common pattern.""" return BlipRefs.all(self, findwhat=None).insert_after( what, bundled_annotations=bundled_annotations) def reply(self): """Create and return a reply to this blip.""" blip_data = self._operation_queue.blip_create_child(self.wave_id, self.wavelet_id, self.blip_id) new_blip = Blip(blip_data, self._other_blips, self._operation_queue) self._other_blips._add(new_blip) return new_blip def append_markup(self, markup): """Interpret the markup text as xhtml and append the result to the doc. Args: markup: The markup'ed text to append. """ markup = util.force_unicode(markup) self._operation_queue.document_append_markup(self.wave_id, self.wavelet_id, self.blip_id, markup) self._content += util.parse_markup(markup) def insert_inline_blip(self, position): """Inserts an inline blip into this blip at a specific position. Args: position: Position to insert the blip at. This has to be greater than 0. Returns: The JSON data of the blip that was created. """ if position <= 0: raise IndexError(('Illegal inline blip position: %d. Position has to ' + 'be greater than 0.') % position) blip_data = self._operation_queue.document_inline_blip_insert( self.wave_id, self.wavelet_id, self.blip_id, position) new_blip = Blip(blip_data, self._other_blips, self._operation_queue) self._other_blips._add(new_blip) return new_blip
[ [ 1, 0, 0.0191, 0.0011, 0, 0.66, 0, 736, 0, 1, 0, 0, 736, 0, 0 ], [ 1, 0, 0.0202, 0.0011, 0, 0.66, 0.1429, 841, 0, 1, 0, 0, 841, 0, 0 ], [ 1, 0, 0.0225, 0.0011, 0, ...
[ "import element", "import errors", "import util", "class Annotation(object):\n \"\"\"Models an annotation on a document.\n\n Annotations are key/value pairs over a range of content. Annotations\n can be used to store data or to be interpreted by a client when displaying\n the data.\n \"\"\"", " \"\"\"...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Elements are non-text bits living in blips like images, gadgets etc. This module defines the Element class and the derived classes. """ import base64 import logging import sys import util class Element(object): """Elements are non-text content within a document. These are generally abstracted from the Robot. Although a Robot can query the properties of an element it can only interact with the specific types that the element represents. Properties of elements are both accessible directly (image.url) and through the properties dictionary (image.properties['url']). In general Element should not be instantiated by robots, but rather rely on the derived classes. """ # INLINE_BLIP_TYPE is not a separate type since it shouldn't be instantiated, # only be used for introspection INLINE_BLIP_TYPE = "INLINE_BLIP" def __init__(self, element_type, **properties): """Initializes self with the specified type and any properties. Args: element_type: string typed member of ELEMENT_TYPE properties: either a dictionary of initial properties, or a dictionary with just one member properties that is itself a dictionary of properties. This allows us to both use e = Element(atype, prop1=val1, prop2=prop2...) and e = Element(atype, properties={prop1:val1, prop2:prop2..}) """ if len(properties) == 1 and 'properties' in properties: properties = properties['properties'] self._type = element_type # as long as the operation_queue of an element in None, it is # unattached. After an element is acquired by a blip, the blip # will set the operation_queue to make sure all changes to the # element are properly send to the server. self._operation_queue = None self._properties = properties.copy() @property def type(self): """The type of this element.""" return self._type @classmethod def from_json(cls, json): """Class method to instantiate an Element based on a json string.""" etype = json['type'] props = json['properties'].copy() element_class = ALL.get(etype) if not element_class: # Unknown type. Server could be newer than we are return Element(element_type=etype, properties=props) return element_class.from_props(props) def get(self, key, default=None): """Standard get interface.""" return self._properties.get(key, default) def __getattr__(self, key): return self._properties[key] def serialize(self): """Custom serializer for Elements.""" return util.serialize({'properties': util.non_none_dict(self._properties), 'type': self._type}) class Input(Element): """A single-line input element.""" class_type = 'INPUT' def __init__(self, name, value=''): super(Input, self).__init__(Input.class_type, name=name, value=value, default_value=value) @classmethod def from_props(cls, props): return Input(name=props.get('name'), value=props.get('value')) class Check(Element): """A checkbox element.""" class_type = 'CHECK' def __init__(self, name, value=''): super(Check, self).__init__(Check.class_type, name=name, value=value, default_value=value) @classmethod def from_props(cls, props): return Check(name=props.get('name'), value=props.get('value')) class Button(Element): """A button element.""" class_type = 'BUTTON' def __init__(self, name, value): super(Button, self).__init__(Button.class_type, name=name, value=value) @classmethod def from_props(cls, props): return Button(name=props.get('name'), value=props.get('value')) class Label(Element): """A label element.""" class_type = 'LABEL' def __init__(self, label_for, caption): super(Label, self).__init__(Label.class_type, name=label_for, value=caption) @classmethod def from_props(cls, props): return Label(label_for=props.get('name'), caption=props.get('value')) class RadioButton(Element): """A radio button element.""" class_type = 'RADIO_BUTTON' def __init__(self, name, group): super(RadioButton, self).__init__(RadioButton.class_type, name=name, value=group) @classmethod def from_props(cls, props): return RadioButton(name=props.get('name'), group=props.get('value')) class RadioButtonGroup(Element): """A group of radio buttons.""" class_type = 'RADIO_BUTTON_GROUP' def __init__(self, name, value): super(RadioButtonGroup, self).__init__(RadioButtonGroup.class_type, name=name, value=value) @classmethod def from_props(cls, props): return RadioButtonGroup(name=props.get('name'), value=props.get('value')) class Password(Element): """A password element.""" class_type = 'PASSWORD' def __init__(self, name, value): super(Password, self).__init__(Password.class_type, name=name, value=value) @classmethod def from_props(cls, props): return Password(name=props.get('name'), value=props.get('value')) class TextArea(Element): """A text area element.""" class_type = 'TEXTAREA' def __init__(self, name, value): super(TextArea, self).__init__(TextArea.class_type, name=name, value=value) @classmethod def from_props(cls, props): return TextArea(name=props.get('name'), value=props.get('value')) class Line(Element): """A line element. Note that Lines are represented in the text as newlines. """ class_type = 'LINE' # Possible line types: #: Designates line as H1, largest heading. TYPE_H1 = 'h1' #: Designates line as H2 heading. TYPE_H2 = 'h2' #: Designates line as H3 heading. TYPE_H3 = 'h3' #: Designates line as H4 heading. TYPE_H4 = 'h4' #: Designates line as H5, smallest heading. TYPE_H5 = 'h5' #: Designates line as a bulleted list item. TYPE_LI = 'li' # Possible values for align #: Sets line alignment to left. ALIGN_LEFT = 'l' #: Sets line alignment to right. ALIGN_RIGHT = 'r' #: Sets line alignment to centered. ALIGN_CENTER = 'c' #: Sets line alignment to justified. ALIGN_JUSTIFIED = 'j' def __init__(self, line_type=None, indent=None, alignment=None, direction=None): super(Line, self).__init__(Line.class_type, lineType=line_type, indent=indent, alignment=alignment, direction=direction) @classmethod def from_props(cls, props): return Line(line_type=props.get('lineType'), indent=props.get('indent'), alignment=props.get('alignment'), direction=props.get('direction')) class Gadget(Element): """A gadget element.""" class_type = 'GADGET' def __init__(self, url, props=None): if props is None: props = {} props['url'] = url super(Gadget, self).__init__(Gadget.class_type, properties=props) @classmethod def from_props(cls, props): return Gadget(props.get('url'), props) def serialize(self): """Gadgets allow for None values.""" return {'properties': self._properties, 'type': self._type} def keys(self): """Get the valid keys for this gadget.""" return [x for x in self._properties.keys() if x != 'url'] class Installer(Element): """An installer element.""" class_type = 'INSTALLER' def __init__(self, manifest): super(Installer, self).__init__(Installer.class_type, manifest=manifest) @classmethod def from_props(cls, props): return Installer(props.get('manifest')) class Image(Element): """An image element.""" class_type = 'IMAGE' def __init__(self, url='', width=None, height=None, attachmentId=None, caption=None): super(Image, self).__init__(Image.class_type, url=url, width=width, height=height, attachmentId=attachmentId, caption=caption) @classmethod def from_props(cls, props): props = dict([(key.encode('utf-8'), value) for key, value in props.items()]) return apply(Image, [], props) class Attachment(Element): """An attachment element. To create a new attachment, caption and data are needed. mimeType, attachmentId and attachmentUrl are sent via events. """ class_type = 'ATTACHMENT' def __init__(self, caption=None, data=None, mimeType=None, attachmentId=None, attachmentUrl=None): Attachment.originalData = data super(Attachment, self).__init__(Attachment.class_type, caption=caption, data=data, mimeType=mimeType, attachmentId=attachmentId, attachmentUrl=attachmentUrl) def __getattr__(self, key): if key and key == 'data': return Attachment.originalData return super(Attachment, self).__getattr__(key) @classmethod def from_props(cls, props): props = dict([(key.encode('utf-8'), value) for key, value in props.items()]) return apply(Attachment, [], props) def serialize(self): """Serializes the attachment object into JSON. The attachment data is base64 encoded. """ if self.data: self._properties['data'] = base64.encodestring(self.data) return super(Attachment, self).serialize() def is_element(cls): """Returns whether the passed class is an element.""" try: if not issubclass(cls, Element): return False h = hasattr(cls, 'class_type') return hasattr(cls, 'class_type') except TypeError: return False ALL = dict([(item.class_type, item) for item in globals().copy().values() if is_element(item)])
[ [ 8, 0, 0.0504, 0.0109, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0627, 0.0027, 0, 0.66, 0.05, 177, 0, 1, 0, 0, 177, 0, 0 ], [ 1, 0, 0.0654, 0.0027, 0, 0.66, ...
[ "\"\"\"Elements are non-text bits living in blips like images, gadgets etc.\n\nThis module defines the Element class and the derived classes.\n\"\"\"", "import base64", "import logging", "import sys", "import util", "class Element(object):\n \"\"\"Elements are non-text content within a document.\n\n The...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the element module.""" import base64 import unittest import element import util class TestElement(unittest.TestCase): """Tests for the element.Element class.""" def testProperties(self): el = element.Element(element.Gadget.class_type, key='value') self.assertEquals('value', el.key) def testFormElement(self): el = element.Input('input') self.assertEquals(element.Input.class_type, el.type) self.assertEquals(el.value, '') self.assertEquals(el.name, 'input') def testImage(self): image = element.Image('http://test.com/image.png', width=100, height=100) self.assertEquals(element.Image.class_type, image.type) self.assertEquals(image.url, 'http://test.com/image.png') self.assertEquals(image.width, 100) self.assertEquals(image.height, 100) def testAttachment(self): attachment = element.Attachment(caption='My Favorite', data='SomefakeData') self.assertEquals(element.Attachment.class_type, attachment.type) self.assertEquals(attachment.caption, 'My Favorite') self.assertEquals(attachment.data, 'SomefakeData') def testGadget(self): gadget = element.Gadget('http://test.com/gadget.xml') self.assertEquals(element.Gadget.class_type, gadget.type) self.assertEquals(gadget.url, 'http://test.com/gadget.xml') def testInstaller(self): installer = element.Installer('http://test.com/installer.xml') self.assertEquals(element.Installer.class_type, installer.type) self.assertEquals(installer.manifest, 'http://test.com/installer.xml') def testSerialize(self): image = element.Image('http://test.com/image.png', width=100, height=100) s = util.serialize(image) k = s.keys() k.sort() # we should really only have three things to serialize props = s['properties'] self.assertEquals(len(props), 3) self.assertEquals(props['url'], 'http://test.com/image.png') self.assertEquals(props['width'], 100) self.assertEquals(props['height'], 100) def testSerializeAttachment(self): attachment = element.Attachment(caption='My Favorite', data='SomefakeData') s = util.serialize(attachment) k = s.keys() k.sort() # we should really have two things to serialize props = s['properties'] self.assertEquals(len(props), 2) self.assertEquals(props['caption'], 'My Favorite') self.assertEquals(props['data'], base64.encodestring('SomefakeData')) self.assertEquals(attachment.data, 'SomefakeData') def testSerializeLine(self): line = element.Line(element.Line.TYPE_H1, alignment=element.Line.ALIGN_LEFT) s = util.serialize(line) k = s.keys() k.sort() # we should really only have three things to serialize props = s['properties'] self.assertEquals(len(props), 2) self.assertEquals(props['alignment'], 'l') self.assertEquals(props['lineType'], 'h1') def testSerializeGadget(self): gadget = element.Gadget('http://test.com', {'prop1': 'a', 'prop_cap': None}) s = util.serialize(gadget) k = s.keys() k.sort() # we should really only have three things to serialize props = s['properties'] self.assertEquals(len(props), 3) self.assertEquals(props['url'], 'http://test.com') self.assertEquals(props['prop1'], 'a') self.assertEquals(props['prop_cap'], None) def testGadgetElementFromJson(self): url = 'http://www.foo.com/gadget.xml' json = { 'type': element.Gadget.class_type, 'properties': { 'url': url, } } gadget = element.Element.from_json(json) self.assertEquals(element.Gadget.class_type, gadget.type) self.assertEquals(url, gadget.url) def testImageElementFromJson(self): url = 'http://www.foo.com/image.png' width = '32' height = '32' attachment_id = '2' caption = 'Test Image' json = { 'type': element.Image.class_type, 'properties': { 'url': url, 'width': width, 'height': height, 'attachmentId': attachment_id, 'caption': caption, } } image = element.Element.from_json(json) self.assertEquals(element.Image.class_type, image.type) self.assertEquals(url, image.url) self.assertEquals(width, image.width) self.assertEquals(height, image.height) self.assertEquals(attachment_id, image.attachmentId) self.assertEquals(caption, image.caption) def testAttachmentElementFromJson(self): caption = 'fake caption' data = 'fake data' mime_type = 'fake mime' attachment_id = 'fake id' attachment_url = 'fake URL' json = { 'type': element.Attachment.class_type, 'properties': { 'caption': caption, 'data': data, 'mimeType': mime_type, 'attachmentId': attachment_id, 'attachmentUrl': attachment_url, } } attachment = element.Element.from_json(json) self.assertEquals(element.Attachment.class_type, attachment.type) self.assertEquals(caption, attachment.caption) self.assertEquals(data, attachment.data) self.assertEquals(mime_type, attachment.mimeType) self.assertEquals(attachment_id, attachment.attachmentId) self.assertEquals(attachment_url, attachment.attachmentUrl) def testFormElementFromJson(self): name = 'button' value = 'value' default_value = 'foo' json = { 'type': element.Label.class_type, 'properties': { 'name': name, 'value': value, 'defaultValue': default_value, } } el = element.Element.from_json(json) self.assertEquals(element.Label.class_type, el.type) self.assertEquals(name, el.name) self.assertEquals(value, el.value) def testCanInstantiate(self): bag = [element.Check(name='check', value='value'), element.Button(name='button', value='caption'), element.Input(name='input', value='caption'), element.Label(label_for='button', caption='caption'), element.RadioButton(name='name', group='group'), element.RadioButtonGroup(name='name', value='value'), element.Password(name='name', value='geheim'), element.TextArea(name='name', value='\n\n\n'), element.Installer(manifest='test.com/installer.xml'), element.Line(line_type='type', indent='3', alignment='r', direction='d'), element.Gadget(url='test.com/gadget.xml', props={'key1': 'val1', 'key2': 'val2'}), element.Image(url='test.com/image.png', width=100, height=200), element.Attachment(caption='fake caption', data='fake data')] types_constructed = set([type(x) for x in bag]) types_required = set(element.ALL.values()) missing_required = types_constructed.difference(types_required) self.assertEquals(missing_required, set()) missing_constructed = types_required.difference(types_constructed) self.assertEquals(missing_constructed, set()) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.0791, 0.0047, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.093, 0.0047, 0, 0.66, 0.1667, 177, 0, 1, 0, 0, 177, 0, 0 ], [ 1, 0, 0.0977, 0.0047, 0, 0.66,...
[ "\"\"\"Unit tests for the element module.\"\"\"", "import base64", "import unittest", "import element", "import util", "class TestElement(unittest.TestCase):\n \"\"\"Tests for the element.Element class.\"\"\"\n\n def testProperties(self):\n el = element.Element(element.Gadget.class_type,\n ...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines classes that are needed to model a wavelet.""" import blip import errors import util class DataDocs(object): """Class modeling a bunch of data documents in pythonic way.""" def __init__(self, init_docs, wave_id, wavelet_id, operation_queue): self._docs = init_docs self._wave_id = wave_id self._wavelet_id = wavelet_id self._operation_queue = operation_queue def __iter__(self): return self._docs.__iter__() def __contains__(self, key): return key in self._docs def __delitem__(self, key): if not key in self._docs: return self._operation_queue.wavelet_datadoc_set( self._wave_id, self._wavelet_id, key, None) del self._docs[key] def __getitem__(self, key): return self._docs[key] def __setitem__(self, key, value): self._operation_queue.wavelet_datadoc_set( self._wave_id, self._wavelet_id, key, value) if value is None and key in self._docs: del self._docs[key] else: self._docs[key] = value def __len__(self): return len(self._docs) def keys(self): return self._docs.keys() def serialize(self): """Returns a dictionary of the data documents.""" return self._docs class Participants(object): """Class modelling a set of participants in pythonic way.""" #: Designates full access (read/write) role. ROLE_FULL = "FULL" #: Designates read-only role. ROLE_READ_ONLY = "READ_ONLY" def __init__(self, participants, roles, wave_id, wavelet_id, operation_queue): self._participants = set(participants) self._roles = roles.copy() self._wave_id = wave_id self._wavelet_id = wavelet_id self._operation_queue = operation_queue def __contains__(self, participant): return participant in self._participants def __len__(self): return len(self._participants) def __iter__(self): return self._participants.__iter__() def add(self, participant_id): """Adds a participant by their ID (address).""" self._operation_queue.wavelet_add_participant( self._wave_id, self._wavelet_id, participant_id) self._participants.add(participant_id) def get_role(self, participant_id): """Return the role for the given participant_id.""" return self._roles.get(participant_id, Participants.ROLE_FULL) def set_role(self, participant_id, role): """Sets the role for the given participant_id.""" if role != Participants.ROLE_FULL and role != Participants.ROLE_READ_ONLY: raise ValueError(role + ' is not a valid role') self._operation_queue.wavelet_modify_participant_role( self._wave_id, self._wavelet_id, participant_id, role) self._roles[participant_id] = role def serialize(self): """Returns a list of the participants.""" return list(self._participants) class Tags(object): """Class modelling a list of tags.""" def __init__(self, tags, wave_id, wavelet_id, operation_queue): self._tags = list(tags) self._wave_id = wave_id self._wavelet_id = wavelet_id self._operation_queue = operation_queue def __getitem__(self, index): return self._tags[index] def __len__(self): return len(self._tags) def __iter__(self): return self._tags.__iter__() def append(self, tag): """Appends a tag if it doesn't already exist.""" tag = util.force_unicode(tag) if tag in self._tags: return self._operation_queue.wavelet_modify_tag( self._wave_id, self._wavelet_id, tag) self._tags.append(tag) def remove(self, tag): """Removes a tag if it exists.""" tag = util.force_unicode(tag) if not tag in self._tags: return self._operation_queue.wavelet_modify_tag( self._wave_id, self._wavelet_id, tag, modify_how='remove') self._tags.remove(tag) def serialize(self): """Returns a list of tags.""" return list(self._tags) class Wavelet(object): """Models a single wavelet. A single wavelet is composed of metadata, participants, and its blips. To guarantee that all blips are available, specify Context.ALL for events. """ def __init__(self, json, blips, robot, operation_queue): """Inits this wavelet with JSON data. Args: json: JSON data dictionary from Wave server. blips: a dictionary object that can be used to resolve blips. robot: the robot owning this wavelet. operation_queue: an OperationQueue object to be used to send any generated operations to. """ self._robot = robot self._operation_queue = operation_queue self._wave_id = json.get('waveId') self._wavelet_id = json.get('waveletId') self._creator = json.get('creator') self._creation_time = json.get('creationTime', 0) self._data_documents = DataDocs(json.get('dataDocuments', {}), self._wave_id, self._wavelet_id, operation_queue) self._last_modified_time = json.get('lastModifiedTime') self._participants = Participants(json.get('participants', []), json.get('participantRoles', {}), self._wave_id, self._wavelet_id, operation_queue) self._title = json.get('title', '') self._tags = Tags(json.get('tags', []), self._wave_id, self._wavelet_id, operation_queue) self._raw_data = json self._blips = blip.Blips(blips) self._root_blip_id = json.get('rootBlipId') if self._root_blip_id and self._root_blip_id in self._blips: self._root_blip = self._blips[self._root_blip_id] else: self._root_blip = None self._robot_address = None @property def wavelet_id(self): """Returns this wavelet's id.""" return self._wavelet_id @property def wave_id(self): """Returns this wavelet's parent wave id.""" return self._wave_id @property def creator(self): """Returns the participant id of the creator of this wavelet.""" return self._creator @property def creation_time(self): """Returns the time that this wavelet was first created in milliseconds.""" return self._creation_time @property def data_documents(self): """Returns the data documents for this wavelet based on key name.""" return self._data_documents @property def domain(self): """Return the domain that wavelet belongs to.""" p = self._wave_id.find('!') if p == -1: return None else: return self._wave_id[:p] @property def last_modified_time(self): """Returns the time that this wavelet was last modified in ms.""" return self._last_modified_time @property def participants(self): """Returns a set of participants on this wavelet.""" return self._participants @property def tags(self): """Returns a list of tags for this wavelet.""" return self._tags @property def robot(self): """The robot that owns this wavelet.""" return self._robot def _get_title(self): return self._title def _set_title(self, title): title = util.force_unicode(title) if title.find('\n') != -1: raise errors.Error('Wavelet title should not contain a newline ' + 'character. Specified: ' + title) self._operation_queue.wavelet_set_title(self.wave_id, self.wavelet_id, title) self._title = title # Adjust the content of the root blip, if it is available in the context. if self._root_blip: content = '\n' splits = self._root_blip._content.split('\n', 2) if len(splits) == 3: content += splits[2] self._root_blip._content = '\n' + title + content #: Returns or sets the wavelet's title. title = property(_get_title, _set_title, doc='Get or set the title of the wavelet.') def _get_robot_address(self): return self._robot_address def _set_robot_address(self, address): if self._robot_address: raise errors.Error('robot address already set') self._robot_address = address robot_address = property(_get_robot_address, _set_robot_address, doc='Get or set the address of the current robot.') @property def root_blip(self): """Returns this wavelet's root blip.""" return self._root_blip @property def blips(self): """Returns the blips for this wavelet.""" return self._blips def get_operation_queue(self): """Returns the OperationQueue for this wavelet.""" return self._operation_queue def serialize(self): """Return a dict of the wavelet properties.""" return {'waveId': self._wave_id, 'waveletId': self._wavelet_id, 'creator': self._creator, 'creationTime': self._creation_time, 'dataDocuments': self._data_documents.serialize(), 'lastModifiedTime': self._last_modified_time, 'participants': self._participants.serialize(), 'title': self._title, 'blips': self._blips.serialize(), 'rootBlipId': self._root_blip_id } def proxy_for(self, proxy_for_id): """Return a view on this wavelet that will proxy for the specified id. A shallow copy of the current wavelet is returned with the proxy_for_id set. Any modifications made to this copy will be done using the proxy_for_id, i.e. the robot+<proxy_for_id>@appspot.com address will be used. If the wavelet was retrieved using the Active Robot API, that is by fetch_wavelet, then the address of the robot must be added to the wavelet by setting wavelet.robot_address before calling proxy_for(). """ self.add_proxying_participant(proxy_for_id) operation_queue = self.get_operation_queue().proxy_for(proxy_for_id) res = Wavelet(json={}, blips={}, robot=self.robot, operation_queue=operation_queue) res._wave_id = self._wave_id res._wavelet_id = self._wavelet_id res._creator = self._creator res._creation_time = self._creation_time res._data_documents = self._data_documents res._last_modified_time = self._last_modified_time res._participants = self._participants res._title = self._title res._raw_data = self._raw_data res._blips = self._blips res._root_blip = self._root_blip return res def add_proxying_participant(self, id): """Ads a proxying participant to the wave. Proxying participants are of the form robot+proxy@domain.com. This convenience method constructs this id and then calls participants.add. """ if not self.robot_address: raise errors.Error( 'Need a robot address to add a proxying for participant') robotid, domain = self.robot_address.split('@', 1) if '#' in robotid: robotid, version = robotid.split('#') else: version = None if '+' in robotid: newid = robotid.split('+', 1)[0] + '+' + id else: newid = robotid + '+' + id if version: newid += '#' + version newid += '@' + domain self.participants.add(newid) def submit_with(self, other_wavelet): """Submit this wavelet when the passed other wavelet is submited. wavelets constructed outside of the event callback need to be either explicitly submited using robot.submit(wavelet) or be associated with a different wavelet that will be submited or is part of the event callback. """ other_wavelet._operation_queue.copy_operations(self._operation_queue) self._operation_queue = other_wavelet._operation_queue def reply(self, initial_content=None): """Replies to the conversation in this wavelet. Args: initial_content: If set, start with this (string) content. Returns: A transient version of the blip that contains the reply. """ if not initial_content: initial_content = u'\n' initial_content = util.force_unicode(initial_content) blip_data = self._operation_queue.wavelet_append_blip( self.wave_id, self.wavelet_id, initial_content) instance = blip.Blip(blip_data, self._blips, self._operation_queue) self._blips._add(instance) return instance def delete(self, todelete): """Remove a blip from this wavelet. Args: todelete: either a blip or a blip id to be removed. """ if isinstance(todelete, blip.Blip): blip_id = todelete.blip_id else: blip_id = todelete self._operation_queue.blip_delete(self.wave_id, self.wavelet_id, blip_id) self._blips._remove_with_id(blip_id)
[ [ 8, 0, 0.0407, 0.0024, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0455, 0.0024, 0, 0.66, 0.1429, 134, 0, 1, 0, 0, 134, 0, 0 ], [ 1, 0, 0.0478, 0.0024, 0, 0.66...
[ "\"\"\"Defines classes that are needed to model a wavelet.\"\"\"", "import blip", "import errors", "import util", "class DataDocs(object):\n \"\"\"Class modeling a bunch of data documents in pythonic way.\"\"\"\n\n def __init__(self, init_docs, wave_id, wavelet_id, operation_queue):\n self._docs = init...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Elements are non-text bits living in blips like images, gadgets etc. This module defines the Element class and the derived classes. """ import base64 import logging import sys import util class Element(object): """Elements are non-text content within a document. These are generally abstracted from the Robot. Although a Robot can query the properties of an element it can only interact with the specific types that the element represents. Properties of elements are both accessible directly (image.url) and through the properties dictionary (image.properties['url']). In general Element should not be instantiated by robots, but rather rely on the derived classes. """ # INLINE_BLIP_TYPE is not a separate type since it shouldn't be instantiated, # only be used for introspection INLINE_BLIP_TYPE = "INLINE_BLIP" def __init__(self, element_type, **properties): """Initializes self with the specified type and any properties. Args: element_type: string typed member of ELEMENT_TYPE properties: either a dictionary of initial properties, or a dictionary with just one member properties that is itself a dictionary of properties. This allows us to both use e = Element(atype, prop1=val1, prop2=prop2...) and e = Element(atype, properties={prop1:val1, prop2:prop2..}) """ if len(properties) == 1 and 'properties' in properties: properties = properties['properties'] self._type = element_type # as long as the operation_queue of an element in None, it is # unattached. After an element is acquired by a blip, the blip # will set the operation_queue to make sure all changes to the # element are properly send to the server. self._operation_queue = None self._properties = properties.copy() @property def type(self): """The type of this element.""" return self._type @classmethod def from_json(cls, json): """Class method to instantiate an Element based on a json string.""" etype = json['type'] props = json['properties'].copy() element_class = ALL.get(etype) if not element_class: # Unknown type. Server could be newer than we are return Element(element_type=etype, properties=props) return element_class.from_props(props) def get(self, key, default=None): """Standard get interface.""" return self._properties.get(key, default) def __getattr__(self, key): return self._properties[key] def serialize(self): """Custom serializer for Elements.""" return util.serialize({'properties': util.non_none_dict(self._properties), 'type': self._type}) class Input(Element): """A single-line input element.""" class_type = 'INPUT' def __init__(self, name, value=''): super(Input, self).__init__(Input.class_type, name=name, value=value, default_value=value) @classmethod def from_props(cls, props): return Input(name=props.get('name'), value=props.get('value')) class Check(Element): """A checkbox element.""" class_type = 'CHECK' def __init__(self, name, value=''): super(Check, self).__init__(Check.class_type, name=name, value=value, default_value=value) @classmethod def from_props(cls, props): return Check(name=props.get('name'), value=props.get('value')) class Button(Element): """A button element.""" class_type = 'BUTTON' def __init__(self, name, value): super(Button, self).__init__(Button.class_type, name=name, value=value) @classmethod def from_props(cls, props): return Button(name=props.get('name'), value=props.get('value')) class Label(Element): """A label element.""" class_type = 'LABEL' def __init__(self, label_for, caption): super(Label, self).__init__(Label.class_type, name=label_for, value=caption) @classmethod def from_props(cls, props): return Label(label_for=props.get('name'), caption=props.get('value')) class RadioButton(Element): """A radio button element.""" class_type = 'RADIO_BUTTON' def __init__(self, name, group): super(RadioButton, self).__init__(RadioButton.class_type, name=name, value=group) @classmethod def from_props(cls, props): return RadioButton(name=props.get('name'), group=props.get('value')) class RadioButtonGroup(Element): """A group of radio buttons.""" class_type = 'RADIO_BUTTON_GROUP' def __init__(self, name, value): super(RadioButtonGroup, self).__init__(RadioButtonGroup.class_type, name=name, value=value) @classmethod def from_props(cls, props): return RadioButtonGroup(name=props.get('name'), value=props.get('value')) class Password(Element): """A password element.""" class_type = 'PASSWORD' def __init__(self, name, value): super(Password, self).__init__(Password.class_type, name=name, value=value) @classmethod def from_props(cls, props): return Password(name=props.get('name'), value=props.get('value')) class TextArea(Element): """A text area element.""" class_type = 'TEXTAREA' def __init__(self, name, value): super(TextArea, self).__init__(TextArea.class_type, name=name, value=value) @classmethod def from_props(cls, props): return TextArea(name=props.get('name'), value=props.get('value')) class Line(Element): """A line element. Note that Lines are represented in the text as newlines. """ class_type = 'LINE' # Possible line types: #: Designates line as H1, largest heading. TYPE_H1 = 'h1' #: Designates line as H2 heading. TYPE_H2 = 'h2' #: Designates line as H3 heading. TYPE_H3 = 'h3' #: Designates line as H4 heading. TYPE_H4 = 'h4' #: Designates line as H5, smallest heading. TYPE_H5 = 'h5' #: Designates line as a bulleted list item. TYPE_LI = 'li' # Possible values for align #: Sets line alignment to left. ALIGN_LEFT = 'l' #: Sets line alignment to right. ALIGN_RIGHT = 'r' #: Sets line alignment to centered. ALIGN_CENTER = 'c' #: Sets line alignment to justified. ALIGN_JUSTIFIED = 'j' def __init__(self, line_type=None, indent=None, alignment=None, direction=None): super(Line, self).__init__(Line.class_type, lineType=line_type, indent=indent, alignment=alignment, direction=direction) @classmethod def from_props(cls, props): return Line(line_type=props.get('lineType'), indent=props.get('indent'), alignment=props.get('alignment'), direction=props.get('direction')) class Gadget(Element): """A gadget element.""" class_type = 'GADGET' def __init__(self, url, props=None): if props is None: props = {} props['url'] = url super(Gadget, self).__init__(Gadget.class_type, properties=props) @classmethod def from_props(cls, props): return Gadget(props.get('url'), props) def serialize(self): """Gadgets allow for None values.""" return {'properties': self._properties, 'type': self._type} def keys(self): """Get the valid keys for this gadget.""" return [x for x in self._properties.keys() if x != 'url'] class Installer(Element): """An installer element.""" class_type = 'INSTALLER' def __init__(self, manifest): super(Installer, self).__init__(Installer.class_type, manifest=manifest) @classmethod def from_props(cls, props): return Installer(props.get('manifest')) class Image(Element): """An image element.""" class_type = 'IMAGE' def __init__(self, url='', width=None, height=None, attachmentId=None, caption=None): super(Image, self).__init__(Image.class_type, url=url, width=width, height=height, attachmentId=attachmentId, caption=caption) @classmethod def from_props(cls, props): props = dict([(key.encode('utf-8'), value) for key, value in props.items()]) return apply(Image, [], props) class Attachment(Element): """An attachment element. To create a new attachment, caption and data are needed. mimeType, attachmentId and attachmentUrl are sent via events. """ class_type = 'ATTACHMENT' def __init__(self, caption=None, data=None, mimeType=None, attachmentId=None, attachmentUrl=None): Attachment.originalData = data super(Attachment, self).__init__(Attachment.class_type, caption=caption, data=data, mimeType=mimeType, attachmentId=attachmentId, attachmentUrl=attachmentUrl) def __getattr__(self, key): if key and key == 'data': return Attachment.originalData return super(Attachment, self).__getattr__(key) @classmethod def from_props(cls, props): props = dict([(key.encode('utf-8'), value) for key, value in props.items()]) return apply(Attachment, [], props) def serialize(self): """Serializes the attachment object into JSON. The attachment data is base64 encoded. """ if self.data: self._properties['data'] = base64.encodestring(self.data) return super(Attachment, self).serialize() def is_element(cls): """Returns whether the passed class is an element.""" try: if not issubclass(cls, Element): return False h = hasattr(cls, 'class_type') return hasattr(cls, 'class_type') except TypeError: return False ALL = dict([(item.class_type, item) for item in globals().copy().values() if is_element(item)])
[ [ 8, 0, 0.0504, 0.0109, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0627, 0.0027, 0, 0.66, 0.05, 177, 0, 1, 0, 0, 177, 0, 0 ], [ 1, 0, 0.0654, 0.0027, 0, 0.66, ...
[ "\"\"\"Elements are non-text bits living in blips like images, gadgets etc.\n\nThis module defines the Element class and the derived classes.\n\"\"\"", "import base64", "import logging", "import sys", "import util", "class Element(object):\n \"\"\"Elements are non-text content within a document.\n\n The...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to run all unit tests in this package.""" import blip_test import element_test import module_test_runner import ops_test import robot_test import util_test import wavelet_test def RunUnitTests(): """Runs all registered unit tests.""" test_runner = module_test_runner.ModuleTestRunner() test_runner.modules = [ blip_test, element_test, ops_test, robot_test, util_test, wavelet_test, ] test_runner.RunAllTests() if __name__ == "__main__": RunUnitTests()
[ [ 8, 0, 0.3864, 0.0227, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4545, 0.0227, 0, 0.66, 0.1111, 740, 0, 1, 0, 0, 740, 0, 0 ], [ 1, 0, 0.4773, 0.0227, 0, 0.66...
[ "\"\"\"Script to run all unit tests in this package.\"\"\"", "import blip_test", "import element_test", "import module_test_runner", "import ops_test", "import robot_test", "import util_test", "import wavelet_test", "def RunUnitTests():\n \"\"\"Runs all registered unit tests.\"\"\"\n test_runner =...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A module to run wave robots on app engine.""" import logging import sys import events from google.appengine.api import urlfetch from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app class CapabilitiesHandler(webapp.RequestHandler): """Handler to forward a request ot a handler of a robot.""" def __init__(self, method, contenttype): """Initializes this handler with a specific robot.""" self._method = method self._contenttype = contenttype def get(self): """Handles HTTP GET request.""" self.response.headers['Content-Type'] = self._contenttype self.response.out.write(self._method()) class ProfileHandler(webapp.RequestHandler): """Handler to forward a request ot a handler of a robot.""" def __init__(self, method, contenttype): """Initializes this handler with a specific robot.""" self._method = method self._contenttype = contenttype def get(self): """Handles HTTP GET request.""" self.response.headers['Content-Type'] = self._contenttype # Respond with proxied profile if name specified if self.request.get('name'): self.response.out.write(self._method(self.request.get('name'))) else: self.response.out.write(self._method()) class RobotEventHandler(webapp.RequestHandler): """Handler for the dispatching of events to various handlers to a robot. This handler only responds to post events with a JSON post body. Its primary task is to separate out the context data from the events in the post body and dispatch all events in order. Once all events have been dispatched it serializes the context data and its associated operations as a response. """ def __init__(self, robot): """Initializes self with a specific robot.""" self._robot = robot def get(self): """Handles the get event for debugging. This is useful for debugging but since event bundles tend to be rather big it often won't fit for more complex requests. """ ops = self.request.get('events') if ops: self.request.body = events self.post() def post(self): """Handles HTTP POST requests.""" json_body = self.request.body if not json_body: # TODO(davidbyttow): Log error? return # Redirect stdout to stderr while executing handlers. This way, any stray # "print" statements in bot code go to the error logs instead of breaking # the JSON response sent to the HTTP channel. saved_stdout, sys.stdout = sys.stdout, sys.stderr json_body = unicode(json_body, 'utf8') logging.info('Incoming: %s', json_body) json_response = self._robot.process_events(json_body) logging.info('Outgoing: %s', json_response) sys.stdout = saved_stdout # Build the response. self.response.headers['Content-Type'] = 'application/json; charset=utf-8' self.response.out.write(json_response.encode('utf-8')) def operation_error_handler(event, wavelet): """Default operation error handler, logging what went wrong.""" if isinstance(event, events.OperationError): logging.error('Previously operation failed: id=%s, message: %s', event.operation_id, event.error_message) def appengine_post(url, data, headers): result = urlfetch.fetch( method='POST', url=url, payload=data, headers=headers, deadline=10) return result.status_code, result.content class RobotVerifyTokenHandler(webapp.RequestHandler): """Handler for the token_verify request.""" def __init__(self, robot): """Initializes self with a specific robot.""" self._robot = robot def get(self): """Handles the get event for debugging. Ops usually too long.""" token, st = self._robot.get_verification_token_info() logging.info('token=%s' % token) if token is None: self.error(404) self.response.out.write('No token set') return if not st is None: if self.request.get('st') != st: self.response.out.write('Invalid st value passed %s != %s' % (st, self.request.get('st'))) return self.response.out.write(token) def create_robot_webapp(robot, debug=False, extra_handlers=None): """Returns an instance of webapp.WSGIApplication with robot handlers.""" if not extra_handlers: extra_handlers = [] return webapp.WSGIApplication([('.*/_wave/capabilities.xml', lambda: CapabilitiesHandler( robot.capabilities_xml, 'application/xml')), ('.*/_wave/robot/profile', lambda: ProfileHandler( robot.profile_json, 'application/json')), ('.*/_wave/robot/jsonrpc', lambda: RobotEventHandler(robot)), ('.*/_wave/verify_token', lambda: RobotVerifyTokenHandler(robot)), ] + extra_handlers, debug=debug) def run(robot, debug=False, log_errors=True, extra_handlers=None): """Sets up the webapp handlers for this robot and starts listening. A robot is typically setup in the following steps: 1. Instantiate and define robot. 2. Register various handlers that it is interested in. 3. Call Run, which will setup the handlers for the app. For example: robot = Robot('Terminator', image_url='http://www.sky.net/models/t800.png', profile_url='http://www.sky.net/models/t800.html') robot.register_handler(WAVELET_PARTICIPANTS_CHANGED, KillParticipant) run(robot) Args: robot: the robot to run. This robot is modified to use app engines urlfetch for posting http. debug: Optional variable that defaults to False and is passed through to the webapp application to determine if it should show debug info. log_errors: Optional flag that defaults to True and determines whether a default handlers to catch errors should be setup that uses the app engine logging to log errors. extra_handlers: Optional list of tuples that are passed to the webapp to install more handlers. For example, passing [('/about', AboutHandler),] would install an extra about handler for the robot. """ # App Engine expects to construct a class with no arguments, so we # pass a lambda that constructs the appropriate handler with # arguments from the enclosing scope. if log_errors: robot.register_handler(events.OperationError, operation_error_handler) robot.http_post = appengine_post app = create_robot_webapp(robot, debug, extra_handlers) run_wsgi_app(app)
[ [ 8, 0, 0.0846, 0.005, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0995, 0.005, 0, 0.66, 0.0714, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.1045, 0.005, 0, 0.66, ...
[ "\"\"\"A module to run wave robots on app engine.\"\"\"", "import logging", "import sys", "import events", "from google.appengine.api import urlfetch", "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "class CapabilitiesHandler(webapp.RequestHandl...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the blip module.""" import unittest import blip import element import ops import simplejson TEST_BLIP_DATA = { 'childBlipIds': [], 'content': '\nhello world!\nanother line', 'contributors': ['robot@test.com', 'user@test.com'], 'creator': 'user@test.com', 'lastModifiedTime': 1000, 'parentBlipId': None, 'annotations': [{'range': {'start': 2, 'end': 3}, 'name': 'key', 'value': 'val'}], 'waveId': 'test.com!w+g3h3im', 'waveletId': 'test.com!root+conv', 'elements':{'14':{'type':'GADGET','properties':{'url':'http://a/b.xml'}}}, } CHILD_BLIP_ID = 'b+42' ROOT_BLIP_ID = 'b+43' class TestBlip(unittest.TestCase): """Tests the primary data structures for the wave model.""" def assertBlipStartswith(self, expected, totest): actual = totest.text[:len(expected)] self.assertEquals(expected, actual) def new_blip(self, **args): """Create a blip for testing.""" data = TEST_BLIP_DATA.copy() data.update(args) res = blip.Blip(data, self.all_blips, self.operation_queue) self.all_blips[res.blip_id] = res return res def setUp(self): self.all_blips = {} self.operation_queue = ops.OperationQueue() def testBlipProperties(self): root = self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID]) child = self.new_blip(blipId=CHILD_BLIP_ID, parentBlipId=ROOT_BLIP_ID) self.assertEquals(ROOT_BLIP_ID, root.blip_id) self.assertEquals(set([CHILD_BLIP_ID]), root.child_blip_ids) self.assertEquals(set(TEST_BLIP_DATA['contributors']), root.contributors) self.assertEquals(TEST_BLIP_DATA['creator'], root.creator) self.assertEquals(TEST_BLIP_DATA['content'], root.text) self.assertEquals(TEST_BLIP_DATA['lastModifiedTime'], root.last_modified_time) self.assertEquals(TEST_BLIP_DATA['parentBlipId'], root.parent_blip_id) self.assertEquals(TEST_BLIP_DATA['waveId'], root.wave_id) self.assertEquals(TEST_BLIP_DATA['waveletId'], root.wavelet_id) self.assertEquals(TEST_BLIP_DATA['content'][3], root[3]) self.assertEquals(element.Gadget.class_type, root[14].type) self.assertEquals('http://a/b.xml', root[14].url) self.assertEquals('a', root.text[14]) self.assertEquals(len(TEST_BLIP_DATA['content']), len(root)) self.assertTrue(root.is_root()) self.assertFalse(child.is_root()) self.assertEquals(root, child.parent_blip) def testBlipSerialize(self): root = self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID]) serialized = root.serialize() unserialized = blip.Blip(serialized, self.all_blips, self.operation_queue) self.assertEquals(root.blip_id, unserialized.blip_id) self.assertEquals(root.child_blip_ids, unserialized.child_blip_ids) self.assertEquals(root.contributors, unserialized.contributors) self.assertEquals(root.creator, unserialized.creator) self.assertEquals(root.text, unserialized.text) self.assertEquals(root.last_modified_time, unserialized.last_modified_time) self.assertEquals(root.parent_blip_id, unserialized.parent_blip_id) self.assertEquals(root.wave_id, unserialized.wave_id) self.assertEquals(root.wavelet_id, unserialized.wavelet_id) self.assertTrue(unserialized.is_root()) def testDocumentOperations(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) newlines = [x for x in blip.find('\n')] self.assertEquals(2, len(newlines)) blip.first('world').replace('jupiter') bits = blip.text.split('\n') self.assertEquals(3, len(bits)) self.assertEquals('hello jupiter!', bits[1]) blip.range(2, 5).delete() self.assertBlipStartswith('\nho jupiter', blip) blip.first('ho').insert_after('la') self.assertBlipStartswith('\nhola jupiter', blip) blip.at(3).insert(' ') self.assertBlipStartswith('\nho la jupiter', blip) def testElementHandling(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) url = 'http://www.test.com/image.png' org_len = len(blip) blip.append(element.Image(url=url)) elems = [elem for elem in blip.find(element.Image, url=url)] self.assertEquals(1, len(elems)) elem = elems[0] self.assertTrue(isinstance(elem, element.Image)) blip.at(1).insert('twelve chars') self.assertTrue(blip.text.startswith('\ntwelve charshello')) elem = blip[org_len + 12].value() self.assertTrue(isinstance(elem, element.Image)) blip.first('twelve ').delete() self.assertTrue(blip.text.startswith('\nchars')) elem = blip[org_len + 12 - len('twelve ')].value() self.assertTrue(isinstance(elem, element.Image)) blip.first('chars').replace(element.Image(url=url)) elems = [elem for elem in blip.find(element.Image, url=url)] self.assertEquals(2, len(elems)) self.assertTrue(blip.text.startswith('\n hello')) elem = blip[1].value() self.assertTrue(isinstance(elem, element.Image)) def testAnnotationHandling(self): key = 'style/fontWeight' def get_bold(): for an in blip.annotations[key]: if an.value == 'bold': return an return None json = ('[{"range":{"start":3,"end":6},"name":"%s","value":"bold"}]' % key) blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json)) self.assertEquals(1, len(blip.annotations)) self.assertNotEqual(None, get_bold().value) self.assertTrue(key in blip.annotations) # extend the bold annotation by adding: blip.range(5, 8).annotate(key, 'bold') self.assertEquals(1, len(blip.annotations)) self.assertEquals(8, get_bold().end) # clip by adding a same keyed: blip[4:12].annotate(key, 'italic') self.assertEquals(2, len(blip.annotations[key])) self.assertEquals(4, get_bold().end) # now split the italic one: blip.range(6, 7).clear_annotation(key) self.assertEquals(3, len(blip.annotations[key])) # test names and iteration self.assertEquals(1, len(blip.annotations.names())) self.assertEquals(3, len([x for x in blip.annotations])) blip[3: 5].annotate('foo', 'bar') self.assertEquals(2, len(blip.annotations.names())) self.assertEquals(4, len([x for x in blip.annotations])) blip[3: 5].clear_annotation('foo') # clear the whole thing blip.all().clear_annotation(key) # getting to the key should now throw an exception self.assertRaises(KeyError, blip.annotations.__getitem__, key) def testBlipOperations(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) self.assertEquals(1, len(self.all_blips)) otherblip = blip.reply() otherblip.append('hello world') self.assertEquals('hello world', otherblip.text) self.assertEquals(blip.blip_id, otherblip.parent_blip_id) self.assertEquals(2, len(self.all_blips)) inline = blip.insert_inline_blip(3) self.assertEquals(blip.blip_id, inline.parent_blip_id) self.assertEquals(3, len(self.all_blips)) def testInsertInlineBlipCantInsertAtTheBeginning(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) self.assertEquals(1, len(self.all_blips)) self.assertRaises(IndexError, blip.insert_inline_blip, 0) self.assertEquals(1, len(self.all_blips)) def testDocumentModify(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) blip.all().replace('a text with text and then some text') blip[7].insert('text ') blip.all('text').replace('thing') self.assertEquals('a thing thing with thing and then some thing', blip.text) def testIteration(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) blip.all().replace('aaa 012 aaa 345 aaa 322') count = 0 prev = -1 for start, end in blip.all('aaa'): count += 1 self.assertTrue(prev < start) prev = start self.assertEquals(3, count) def testBlipRefValue(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) content = blip.text content = content[:4] + content[5:] del blip[4] self.assertEquals(content, blip.text) content = content[:2] + content[3:] del blip[2:3] self.assertEquals(content, blip.text) blip[2:3] = 'bike' content = content[:2] + 'bike' + content[3:] self.assertEquals(content, blip.text) url = 'http://www.test.com/image.png' blip.append(element.Image(url=url)) self.assertEqual(url, blip.first(element.Image).url) url2 = 'http://www.test.com/another.png' blip[-1].update_element({'url': url2}) self.assertEqual(url2, blip.first(element.Image).url) self.assertTrue(blip[3:5] == blip.text[3:5]) blip.append('geheim') self.assertTrue(blip.first('geheim')) self.assertFalse(blip.first(element.Button)) blip.append(element.Button(name='test1', value='Click')) button = blip.first(element.Button) button.update_element({'name': 'test2'}) self.assertEqual('test2', button.name) def testReplace(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) blip.all().replace('\nxxxx') blip.all('yyy').replace('zzz') self.assertEqual('\nxxxx', blip.text) def testDeleteRangeThatSpansAcrossAnnotationEndPoint(self): json = ('[{"range":{"start":1,"end":3},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 4).delete() self.assertEqual('\nF bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(2, blip.annotations['style'][0].end) def testInsertBeforeAnnotationStartPoint(self): json = ('[{"range":{"start":4,"end":9},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.at(4).insert('d and') self.assertEqual('\nFood and bar.', blip.text) self.assertEqual(9, blip.annotations['style'][0].start) self.assertEqual(14, blip.annotations['style'][0].end) def testDeleteRangeInsideAnnotation(self): json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 4).delete() self.assertEqual('\nF bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(3, blip.annotations['style'][0].end) def testReplaceInsideAnnotation(self): json = ('[{"range":{"start":1,"end":5},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 4).replace('ooo') self.assertEqual('\nFooo bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(6, blip.annotations['style'][0].end) blip.range(2, 5).replace('o') self.assertEqual('\nFo bar.', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(4, blip.annotations['style'][0].end) def testReplaceSpanAnnotation(self): json = ('[{"range":{"start":1,"end":4},"name":"style","value":"bold"}]') blip = self.new_blip(blipId=ROOT_BLIP_ID, annotations=simplejson.loads(json), content='\nFoo bar.') blip.range(2, 9).replace('') self.assertEqual('\nF', blip.text) self.assertEqual(1, blip.annotations['style'][0].start) self.assertEqual(2, blip.annotations['style'][0].end) def testSearchWithNoMatchShouldNotGenerateOperation(self): blip = self.new_blip(blipId=ROOT_BLIP_ID) self.assertEqual(-1, blip.text.find(':(')) self.assertEqual(0, len(self.operation_queue)) blip.all(':(').replace(':)') self.assertEqual(0, len(self.operation_queue)) def testBlipsRemoveWithId(self): blip_dict = { ROOT_BLIP_ID: self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID]), CHILD_BLIP_ID: self.new_blip(blipId=CHILD_BLIP_ID, parentBlipId=ROOT_BLIP_ID) } blips = blip.Blips(blip_dict) blips._remove_with_id(CHILD_BLIP_ID) self.assertEqual(1, len(blips)) self.assertEqual(0, len(blips[ROOT_BLIP_ID].child_blip_ids)) def testAppendMarkup(self): blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.') markup = '<p><span>markup<span> content</p>' blip.append_markup(markup) self.assertEqual(1, len(self.operation_queue)) self.assertEqual('\nFoo bar.\nmarkup content', blip.text) def testBundledAnnotations(self): blip = self.new_blip(blipId=ROOT_BLIP_ID, content='\nFoo bar.') blip.append('not bold') blip.append('bold', bundled_annotations=[('style/fontWeight', 'bold')]) self.assertEqual(2, len(blip.annotations)) self.assertEqual('bold', blip.annotations['style/fontWeight'][0].value) def testInlineBlipOffset(self): offset = 14 self.new_blip(blipId=ROOT_BLIP_ID, childBlipIds=[CHILD_BLIP_ID], elements={str(offset): {'type': element.Element.INLINE_BLIP_TYPE, 'properties': {'id': CHILD_BLIP_ID}}}) child = self.new_blip(blipId=CHILD_BLIP_ID, parentBlipId=ROOT_BLIP_ID) self.assertEqual(offset, child.inline_blip_offset) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.0455, 0.0027, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0535, 0.0027, 0, 0.66, 0.1, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.0588, 0.0027, 0, 0.66, ...
[ "\"\"\"Unit tests for the blip module.\"\"\"", "import unittest", "import blip", "import element", "import ops", "import simplejson", "TEST_BLIP_DATA = {\n 'childBlipIds': [],\n 'content': '\\nhello world!\\nanother line',\n 'contributors': ['robot@test.com', 'user@test.com'],\n 'creator': '...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Support for operations that can be applied to the server. Contains classes and utilities for creating operations that are to be applied on the server. """ import errors import random import util import sys PROTOCOL_VERSION = '0.21' # Operation Types WAVELET_APPEND_BLIP = 'wavelet.appendBlip' WAVELET_SET_TITLE = 'wavelet.setTitle' WAVELET_ADD_PARTICIPANT = 'wavelet.participant.add' WAVELET_DATADOC_SET = 'wavelet.datadoc.set' WAVELET_MODIFY_TAG = 'wavelet.modifyTag' WAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole' BLIP_CREATE_CHILD = 'blip.createChild' BLIP_DELETE = 'blip.delete' DOCUMENT_APPEND_MARKUP = 'document.appendMarkup' DOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert' DOCUMENT_MODIFY = 'document.modify' ROBOT_CREATE_WAVELET = 'robot.createWavelet' ROBOT_FETCH_WAVE = 'robot.fetchWave' ROBOT_NOTIFY_CAPABILITIES_HASH = 'robot.notifyCapabilitiesHash' class Operation(object): """Represents a generic operation applied on the server. This operation class contains data that is filled in depending on the operation type. It can be used directly, but doing so will not result in local, transient reflection of state on the blips. In other words, creating a 'delete blip' operation will not remove the blip from the local context for the duration of this session. It is better to use the OpBased model classes directly instead. """ def __init__(self, method, opid, params): """Initializes this operation with contextual data. Args: method: Method to call or type of operation. opid: The id of the operation. Any callbacks will refer to these. params: An operation type dependent dictionary """ self.method = method self.id = opid self.params = params def __str__(self): return '%s[%s]%s' % (self.method, self.id, str(self.params)) def set_param(self, param, value): self.params[param] = value return self def serialize(self, method_prefix=''): """Serialize the operation. Args: method_prefix: prefixed for each method name to allow for specifying a namespace. Returns: a dict representation of the operation. """ if method_prefix and not method_prefix.endswith('.'): method_prefix += '.' return {'method': method_prefix + self.method, 'id': self.id, 'params': util.serialize(self.params)} def set_optional(self, param, value): """Sets an optional parameter. If value is None or "", this is a no op. Otherwise it calls set_param. """ if value == '' or value is None: return self else: return self.set_param(param, value) class OperationQueue(object): """Wraps the queuing of operations using easily callable functions. The operation queue wraps single operations as functions and queues the resulting operations in-order. Typically there shouldn't be a need to call this directly unless operations are needed on entities outside of the scope of the robot. For example, to modify a blip that does not exist in the current context, you might specify the wave, wavelet and blip id to generate an operation. Any calls to this will not be reflected in the robot in any way. For example, calling wavelet_append_blip will not result in a new blip being added to the robot, only an operation to be applied on the server. """ # Some class global counters: _next_operation_id = 1 def __init__(self, proxy_for_id=None): self.__pending = [] self._capability_hash = 0 self._proxy_for_id = proxy_for_id def _new_blipdata(self, wave_id, wavelet_id, initial_content='', parent_blip_id=None): """Creates JSON of the blip used for this session.""" temp_blip_id = 'TBD_%s_%s' % (wavelet_id, hex(random.randint(0, sys.maxint))) return {'waveId': wave_id, 'waveletId': wavelet_id, 'blipId': temp_blip_id, 'content': initial_content, 'parentBlipId': parent_blip_id} def _new_waveletdata(self, domain, participants): """Creates an ephemeral WaveletData instance used for this session. Args: domain: the domain to create the data for. participants initially on the wavelet Returns: Blipdata (for the rootblip), WaveletData. """ wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint)) wavelet_id = domain + '!conv+root' root_blip_data = self._new_blipdata(wave_id, wavelet_id) participants = set(participants) wavelet_data = {'waveId': wave_id, 'waveletId': wavelet_id, 'rootBlipId': root_blip_data['blipId'], 'participants': participants} return root_blip_data, wavelet_data def __len__(self): return len(self.__pending) def __iter__(self): return self.__pending.__iter__() def clear(self): self.__pending = [] def proxy_for(self, proxy): """Return a view of this operation queue with the proxying for set to proxy. This method returns a new instance of an operation queue that shares the operation list, but has a different proxying_for_id set so the robot using this new queue will send out operations with the proxying_for field set. """ res = OperationQueue() res.__pending = self.__pending res._capability_hash = self._capability_hash res._proxy_for_id = proxy return res def set_capability_hash(self, capability_hash): self._capability_hash = capability_hash def serialize(self): first = Operation(ROBOT_NOTIFY_CAPABILITIES_HASH, '0', {'capabilitiesHash': self._capability_hash, 'protocolVersion': PROTOCOL_VERSION}) operations = [first] + self.__pending res = util.serialize(operations) return res def copy_operations(self, other_queue): """Copy the pending operations from other_queue into this one.""" for op in other_queue: self.__pending.append(op) def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops): """Creates and adds a new operation to the operation list.""" if props is None: props = {} props.update(kwprops) props['waveId'] = wave_id props['waveletId'] = wavelet_id if self._proxy_for_id: props['proxyingFor'] = self._proxy_for_id operation = Operation(method, 'op%s' % OperationQueue._next_operation_id, props) self.__pending.append(operation) OperationQueue._next_operation_id += 1 return operation def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''): """Appends a blip to a wavelet. Args: wave_id: The wave id owning the containing wavelet. wavelet_id: The wavelet id that this blip should be appended to. initial_content: optionally the content to start with Returns: JSON representing the information of the new blip. """ blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content) self.new_operation(WAVELET_APPEND_BLIP, wave_id, wavelet_id, blipData=blip_data) return blip_data def wavelet_add_participant(self, wave_id, wavelet_id, participant_id): """Adds a participant to a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. participant_id: Id of the participant to add. Returns: data for the root_blip, wavelet """ return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id, participantId=participant_id) def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data): """Sets a key/value pair on the data document of a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. name: The key name for this data. data: The value of the data to set. Returns: The operation created. """ return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id, datadocName=name, datadocValue=data) def robot_create_wavelet(self, domain, participants=None, message=''): """Creates a new wavelet. Args: domain: the domain to create the wave in participants: initial participants on this wavelet or None if none message: an optional payload that is returned with the corresponding event. Returns: data for the root_blip, wavelet """ if participants is None: participants = [] blip_data, wavelet_data = self._new_waveletdata(domain, participants) op = self.new_operation(ROBOT_CREATE_WAVELET, wave_id=wavelet_data['waveId'], wavelet_id=wavelet_data['waveletId'], waveletData=wavelet_data) op.set_optional('message', message) return blip_data, wavelet_data def robot_fetch_wave(self, wave_id, wavelet_id): """Requests a snapshot of the specified wave. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. Returns: The operation created. """ return self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id) def wavelet_set_title(self, wave_id, wavelet_id, title): """Sets the title of a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. title: The title to set. Returns: The operation created. """ return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id, waveletTitle=title) def wavelet_modify_participant_role( self, wave_id, wavelet_id, participant_id, role): """Modify the role of a participant on a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. participant_id: Id of the participant to add. role: the new roles Returns: data for the root_blip, wavelet """ return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id, wavelet_id, participantId=participant_id, participantRole=role) def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None): """Modifies a tag in a wavelet. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. tag: The tag (a string). modify_how: (optional) how to apply the tag. The default is to add the tag. Specify 'remove' to remove. Specify None or 'add' to add. Returns: The operation created. """ return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id, name=tag).set_optional("modify_how", modify_how) def blip_create_child(self, wave_id, wavelet_id, blip_id): """Creates a child blip of another blip. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. Returns: JSON of blip for which further operations can be applied. """ blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id) self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id, blipId=blip_id, blipData=blip_data) return blip_data def blip_delete(self, wave_id, wavelet_id, blip_id): """Deletes the specified blip. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. Returns: The operation created. """ return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id) def document_append_markup(self, wave_id, wavelet_id, blip_id, content): """Appends content with markup to a document. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. content: The markup content to append. Returns: The operation created. """ return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id, blipId=blip_id, content=content) def document_modify(self, wave_id, wavelet_id, blip_id): """Creates and queues a document modify operation The returned operation still needs to be filled with details before it makes sense. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. Returns: The operation created. """ return self.new_operation(DOCUMENT_MODIFY, wave_id, wavelet_id, blipId=blip_id) def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position): """Inserts an inline blip at a specific location. Args: wave_id: The wave id owning that this operation is applied to. wavelet_id: The wavelet id that this operation is applied to. blip_id: The blip id that this operation is applied to. position: The position in the document to insert the blip. Returns: JSON data for the blip that was created for further operations. """ inline_blip_data = self._new_blipdata(wave_id, wavelet_id) inline_blip_data['parentBlipId'] = blip_id self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id, blipId=blip_id, index=position, blipData=inline_blip_data) return inline_blip_data
[ [ 8, 0, 0.0453, 0.0119, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0549, 0.0024, 0, 0.66, 0.0476, 841, 0, 1, 0, 0, 841, 0, 0 ], [ 1, 0, 0.0573, 0.0024, 0, 0.66...
[ "\"\"\"Support for operations that can be applied to the server.\n\nContains classes and utilities for creating operations that are to be\napplied on the server.\n\"\"\"", "import errors", "import random", "import util", "import sys", "PROTOCOL_VERSION = '0.21'", "WAVELET_APPEND_BLIP = 'wavelet.appendBl...
#!/usr/bin/python2.4 # # Copyright 2009 Google Inc. All Rights Reserved. """Tests for google3.walkabout.externalagents.api.commandline_robot_runner.""" __author__ = 'douwe@google.com (Douwe Osinga)' import StringIO from google3.pyglib import app from google3.pyglib import flags from google3.testing.pybase import googletest from google3.walkabout.externalagents.api import commandline_robot_runner from google3.walkabout.externalagents.api import events FLAGS = flags.FLAGS BLIP_JSON = ('{"wdykLROk*13":' '{"lastModifiedTime":1242079608457,' '"contributors":["someguy@test.com"],' '"waveletId":"test.com!conv+root",' '"waveId":"test.com!wdykLROk*11",' '"parentBlipId":null,' '"version":3,' '"creator":"someguy@test.com",' '"content":"\\nContent!",' '"blipId":"wdykLROk*13",' '"annotations":[{"range":{"start":0,"end":1},' '"name":"user/e/otherguy@test.com","value":"Other"}],' '"elements":{},' '"childBlipIds":[]}' '}') WAVELET_JSON = ('{"lastModifiedTime":1242079611003,' '"title":"A title",' '"waveletId":"test.com!conv+root",' '"rootBlipId":"wdykLROk*13",' '"dataDocuments":null,' '"creationTime":1242079608457,' '"waveId":"test.com!wdykLROk*11",' '"participants":["someguy@test.com","monty@appspot.com"],' '"creator":"someguy@test.com",' '"version":5}') EVENTS_JSON = ('[{"timestamp":1242079611003,' '"modifiedBy":"someguy@test.com",' '"properties":{"participantsRemoved":[],' '"participantsAdded":["monty@appspot.com"]},' '"type":"WAVELET_PARTICIPANTS_CHANGED"}]') TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % ( BLIP_JSON, WAVELET_JSON, EVENTS_JSON) class CommandlineRobotRunnerTest(googletest.TestCase): def testSimpleFlow(self): FLAGS.eventdef_wavelet_participants_changed = 'x' flag = 'eventdef_' + events.WaveletParticipantsChanged.type.lower() setattr(FLAGS, flag, 'w.title="New title!"') input_stream = StringIO.StringIO(TEST_JSON) output_stream = StringIO.StringIO() commandline_robot_runner.run_bot(input_stream, output_stream) res = output_stream.getvalue() self.assertTrue('wavelet.setTitle' in res) def main(unused_argv): googletest.main() if __name__ == '__main__': app.run()
[ [ 8, 0, 0.0658, 0.0132, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0921, 0.0132, 0, 0.66, 0.0667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1184, 0.0132, 0, 0.66,...
[ "\"\"\"Tests for google3.walkabout.externalagents.api.commandline_robot_runner.\"\"\"", "__author__ = 'douwe@google.com (Douwe Osinga)'", "import StringIO", "from google3.pyglib import app", "from google3.pyglib import flags", "from google3.testing.pybase import googletest", "from google3.walkabout.exte...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the ops module.""" import unittest import ops class TestOperation(unittest.TestCase): """Test case for Operation class.""" def testFields(self): op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02', {'waveId': 'wavelet-id', 'title': 'a title'}) self.assertEqual(ops.WAVELET_SET_TITLE, op.method) self.assertEqual('opid02', op.id) self.assertEqual(2, len(op.params)) def testConstructModifyTag(self): q = ops.OperationQueue() op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag') self.assertEqual(3, len(op.params)) op = q.wavelet_modify_tag( 'waveid', 'waveletid', 'tag', modify_how='remove') self.assertEqual(4, len(op.params)) def testConstructRobotFetchWave(self): q = ops.OperationQueue('proxyid') op = q.robot_fetch_wave('wave1', 'wavelet1') self.assertEqual(3, len(op.params)) self.assertEqual('proxyid', op.params['proxyingFor']) self.assertEqual('wave1', op.params['waveId']) self.assertEqual('wavelet1', op.params['waveletId']) class TestOperationQueue(unittest.TestCase): """Test case for OperationQueue class.""" def testSerialize(self): q = ops.OperationQueue() q.set_capability_hash('hash') op = q.wavelet_modify_tag('waveid', 'waveletid', 'tag') json = q.serialize() self.assertEqual(2, len(json)) self.assertEqual('robot.notifyCapabilitiesHash', json[0]['method']) self.assertEqual('hash', json[0]['params']['capabilitiesHash']) self.assertEqual(ops.PROTOCOL_VERSION, json[0]['params']['protocolVersion']) self.assertEqual('wavelet.modifyTag', json[1]['method']) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.2537, 0.0149, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2985, 0.0149, 0, 0.66, 0.2, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.3284, 0.0149, 0, 0.66, ...
[ "\"\"\"Unit tests for the ops module.\"\"\"", "import unittest", "import ops", "class TestOperation(unittest.TestCase):\n \"\"\"Test case for Operation class.\"\"\"\n\n def testFields(self):\n op = ops.Operation(ops.WAVELET_SET_TITLE, 'opid02',\n {'waveId': 'wavelet-id',\n ...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility library containing various helpers used by the API.""" import re CUSTOM_SERIALIZE_METHOD_NAME = 'serialize' MARKUP_RE = re.compile(r'<([^>]*?)>') def force_unicode(object): """ Return the Unicode string version of object, with UTF-8 encoding. """ if isinstance(object, unicode): return object return unicode(str(object), 'utf-8') def parse_markup(markup): """Parses a bit of markup into robot compatible text. For now this is a rough approximation. """ def replace_tag(group): if not group.groups: return '' tag = group.groups()[0].split(' ', 1)[0] if (tag == 'p' or tag == 'br'): return '\n' return '' return MARKUP_RE.sub(replace_tag, markup) def is_iterable(inst): """Returns whether or not this is a list, tuple, set or dict . Note that this does not return true for strings. """ return hasattr(inst, '__iter__') def is_dict(inst): """Returns whether or not the specified instance is a dict.""" return hasattr(inst, 'iteritems') def is_user_defined_new_style_class(obj): """Returns whether or not the specified instance is a user-defined type.""" return type(obj).__module__ != '__builtin__' def lower_camel_case(s): """Converts a string to lower camel case. Examples: foo => foo foo_bar => fooBar foo__bar => fooBar foo_bar_baz => fooBarBaz Args: s: The string to convert to lower camel case. Returns: The lower camel cased string. """ return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_')) def non_none_dict(d): """return a copy of the dictionary without none values.""" return dict([a for a in d.items() if not a[1] is None]) def _serialize_attributes(obj): """Serializes attributes of an instance. Iterates all attributes of an object and invokes serialize if they are public and not callable. Args: obj: The instance to serialize. Returns: The serialized object. """ data = {} for attr_name in dir(obj): if attr_name.startswith('_'): continue attr = getattr(obj, attr_name) if attr is None or callable(attr): continue # Looks okay, serialize it. data[lower_camel_case(attr_name)] = serialize(attr) return data def _serialize_dict(d): """Invokes serialize on all of its key/value pairs. Args: d: The dict instance to serialize. Returns: The serialized dict. """ data = {} for k, v in d.items(): data[lower_camel_case(k)] = serialize(v) return data def serialize(obj): """Serializes any instance. If this is a user-defined instance type, it will first check for a custom Serialize() function and use that if it exists. Otherwise, it will invoke serialize all of its public attributes. Lists and dicts are serialized trivially. Args: obj: The instance to serialize. Returns: The serialized object. """ if is_user_defined_new_style_class(obj): if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME): method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME) if callable(method): return method() return _serialize_attributes(obj) elif is_dict(obj): return _serialize_dict(obj) elif is_iterable(obj): return [serialize(v) for v in obj] return obj class StringEnum(object): """Enum like class that is configured with a list of values. This class effectively implements an enum for Elements, except for that the actual values of the enums will be the string values. """ def __init__(self, *values): for name in values: setattr(self, name, name)
[ [ 8, 0, 0.1069, 0.0063, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1195, 0.0063, 0, 0.66, 0.0714, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 14, 0, 0.1321, 0.0063, 0, 0.6...
[ "\"\"\"Utility library containing various helpers used by the API.\"\"\"", "import re", "CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'", "MARKUP_RE = re.compile(r'<([^>]*?)>')", "def force_unicode(object):\n \"\"\" Return the Unicode string version of object, with UTF-8 encoding. \"\"\"\n if isinstance(objec...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module defines the ModuleTestRunnerClass.""" import unittest class ModuleTestRunner(object): """Responsible for executing all test cases in a list of modules.""" def __init__(self, module_list=None, module_test_settings=None): self.modules = module_list or [] self.settings = module_test_settings or {} def RunAllTests(self): """Executes all tests present in the list of modules.""" runner = unittest.TextTestRunner() for module in self.modules: for setting, value in self.settings.iteritems(): try: setattr(module, setting, value) except AttributeError: print '\nError running ' + str(setting) print '\nRunning all tests in module', module.__name__ runner.run(unittest.defaultTestLoader.loadTestsFromModule(module))
[ [ 8, 0, 0.425, 0.025, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.5, 0.025, 0, 0.66, 0.5, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 3, 0, 0.7875, 0.45, 0, 0.66, 1, ...
[ "\"\"\"Module defines the ModuleTestRunnerClass.\"\"\"", "import unittest", "class ModuleTestRunner(object):\n \"\"\"Responsible for executing all test cases in a list of modules.\"\"\"\n\n def __init__(self, module_list=None, module_test_settings=None):\n self.modules = module_list or []\n self.setting...
import simplejson import cgi class JSONFilter(object): def __init__(self, app, mime_type='text/x-json'): self.app = app self.mime_type = mime_type def __call__(self, environ, start_response): # Read JSON POST input to jsonfilter.json if matching mime type response = {'status': '200 OK', 'headers': []} def json_start_response(status, headers): response['status'] = status response['headers'].extend(headers) environ['jsonfilter.mime_type'] = self.mime_type if environ.get('REQUEST_METHOD', '') == 'POST': if environ.get('CONTENT_TYPE', '') == self.mime_type: args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _] data = environ['wsgi.input'].read(*map(int, args)) environ['jsonfilter.json'] = simplejson.loads(data) res = simplejson.dumps(self.app(environ, json_start_response)) jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp') if jsonp: content_type = 'text/javascript' res = ''.join(jsonp + ['(', res, ')']) elif 'Opera' in environ.get('HTTP_USER_AGENT', ''): # Opera has bunk XMLHttpRequest support for most mime types content_type = 'text/plain' else: content_type = self.mime_type headers = [ ('Content-type', content_type), ('Content-length', len(res)), ] headers.extend(response['headers']) start_response(response['status'], headers) return [res] def factory(app, global_conf, **kw): return JSONFilter(app, **kw)
[ [ 1, 0, 0.025, 0.025, 0, 0.66, 0, 386, 0, 1, 0, 0, 386, 0, 0 ], [ 1, 0, 0.05, 0.025, 0, 0.66, 0.3333, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 3, 0, 0.5125, 0.85, 0, 0.66, ...
[ "import simplejson", "import cgi", "class JSONFilter(object):\n def __init__(self, app, mime_type='text/x-json'):\n self.app = app\n self.mime_type = mime_type\n\n def __call__(self, environ, start_response):\n # Read JSON POST input to jsonfilter.json if matching mime type\n r...
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger http://code.activestate.com/recipes/576693/ """ from UserDict import DictMixin # Modified from original to support Python 2.4, see # http://code.google.com/p/simplejson/issues/detail?id=53 try: all except NameError: def all(seq): for elem in seq: if not elem: return False return True class OrderedDict(dict, DictMixin): def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except AttributeError: self.clear() self.update(*args, **kwds) def clear(self): self.__end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.__map = {} # key --> [key, prev, next] dict.clear(self) def __setitem__(self, key, value): if key not in self: end = self.__end curr = end[1] curr[2] = end[1] = self.__map[key] = [key, curr, end] dict.__setitem__(self, key, value) def __delitem__(self, key): dict.__delitem__(self, key) key, prev, next = self.__map.pop(key) prev[2] = next next[1] = prev def __iter__(self): end = self.__end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.__end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] def popitem(self, last=True): if not self: raise KeyError('dictionary is empty') # Modified from original to support Python 2.4, see # http://code.google.com/p/simplejson/issues/detail?id=53 if last: key = reversed(self).next() else: key = iter(self).next() value = self.pop(key) return key, value def __reduce__(self): items = [[k, self[k]] for k in self] tmp = self.__map, self.__end del self.__map, self.__end inst_dict = vars(self).copy() self.__map, self.__end = tmp if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def keys(self): return list(self) setdefault = DictMixin.setdefault update = DictMixin.update pop = DictMixin.pop values = DictMixin.values items = DictMixin.items iterkeys = DictMixin.iterkeys itervalues = DictMixin.itervalues iteritems = DictMixin.iteritems def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) def copy(self): return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): if isinstance(other, OrderedDict): return len(self)==len(other) and \ all(p==q for p, q in zip(self.items(), other.items())) return dict.__eq__(self, other) def __ne__(self, other): return not self == other
[ [ 8, 0, 0.0252, 0.042, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0504, 0.0084, 0, 0.66, 0.3333, 351, 0, 1, 0, 0, 351, 0, 0 ], [ 7, 0, 0.1134, 0.0672, 0, 0.66,...
[ "\"\"\"Drop-in replacement for collections.OrderedDict by Raymond Hettinger\n\nhttp://code.activestate.com/recipes/576693/\n\n\"\"\"", "from UserDict import DictMixin", "try:\n all\nexcept NameError:\n def all(seq):\n for elem in seq:\n if not elem:\n return False\n ...
"""JSON token scanner """ import re try: from simplejson._speedups import make_scanner as c_make_scanner except ImportError: c_make_scanner = None __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook object_pairs_hook = context.object_pairs_hook def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook, object_pairs_hook) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration return _scan_once make_scanner = c_make_scanner or py_make_scanner
[ [ 8, 0, 0.0224, 0.0299, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0448, 0.0149, 0, 0.66, 0.1667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.0821, 0.0597, 0, 0.66...
[ "\"\"\"JSON token scanner\n\"\"\"", "import re", "try:\n from simplejson._speedups import make_scanner as c_make_scanner\nexcept ImportError:\n c_make_scanner = None", " from simplejson._speedups import make_scanner as c_make_scanner", " c_make_scanner = None", "__all__ = ['make_scanner']", ...
"""Implementation of JSONEncoder """ import re try: from _speedups import encode_basestring_ascii as \ c_encode_basestring_ascii except ImportError: c_encode_basestring_ascii = None try: from _speedups import make_encoder as c_make_encoder except ImportError: c_make_encoder = None from decoder import PosInf ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') ESCAPE_DCT = { '\\': '\\\\', '"': '\\"', '\b': '\\b', '\f': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', } for i in range(0x20): #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) FLOAT_REPR = repr def encode_basestring(s): """Return a JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): return ESCAPE_DCT[match.group(0)] return u'"' + ESCAPE.sub(replace, s) + u'"' def py_encode_basestring_ascii(s): """Return an ASCII-only JSON representation of a Python string """ if isinstance(s, str) and HAS_UTF8.search(s) is not None: s = s.decode('utf-8') def replace(match): s = match.group(0) try: return ESCAPE_DCT[s] except KeyError: n = ord(s) if n < 0x10000: #return '\\u{0:04x}'.format(n) return '\\u%04x' % (n,) else: # surrogate pair n -= 0x10000 s1 = 0xd800 | ((n >> 10) & 0x3ff) s2 = 0xdc00 | (n & 0x3ff) #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) return '\\u%04x\\u%04x' % (s1, s2) return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' encode_basestring_ascii = ( c_encode_basestring_ascii or py_encode_basestring_ascii) class JSONEncoder(object): """Extensible JSON <http://json.org> encoder for Python data structures. Supports the following objects and types by default: +-------------------+---------------+ | Python | JSON | +===================+===============+ | dict | object | +-------------------+---------------+ | list, tuple | array | +-------------------+---------------+ | str, unicode | string | +-------------------+---------------+ | int, long, float | number | +-------------------+---------------+ | True | true | +-------------------+---------------+ | False | false | +-------------------+---------------+ | None | null | +-------------------+---------------+ To extend this to recognize other objects, subclass and implement a ``.default()`` method with another method that returns a serializable object for ``o`` if possible, otherwise it should call the superclass implementation (to raise ``TypeError``). """ item_separator = ', ' key_separator = ': ' def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None): """Constructor for JSONEncoder, with sensible defaults. If skipkeys is false, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If ensure_ascii is true, the output is guaranteed to be str objects with all incoming unicode characters escaped. If ensure_ascii is false, the output will be unicode object. If check_circular is true, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is true, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is true, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If specified, separators should be a (item_separator, key_separator) tuple. The default is (', ', ': '). To get the most compact JSON representation you should specify (',', ':') to eliminate whitespace. If specified, default is a function that gets called for objects that can't otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8. """ self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys if isinstance(indent, (int, long)): indent = ' ' * indent self.indent = indent if separators is not None: self.item_separator, self.key_separator = separators if default is not None: self.default = default self.encoding = encoding def default(self, o): """Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) return JSONEncoder.default(self, o) """ raise TypeError(repr(o) + " is not JSON serializable") def encode(self, o): """Return a JSON string representation of a Python data structure. >>> from simplejson import JSONEncoder >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}' """ # This is for extremely simple cases and benchmarks. if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) # This doesn't pass the iterator directly to ''.join() because the # exceptions aren't as detailed. The list call should be roughly # equivalent to the PySequence_Fast that ''.join() would do. chunks = self.iterencode(o, _one_shot=True) if not isinstance(chunks, (list, tuple)): chunks = list(chunks) if self.ensure_ascii: return ''.join(chunks) else: return u''.join(chunks) def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): # Check for specials. Note that this type of test is processor # and/or platform-specific, so do tests which don't depend on # the internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o)) return text if (_one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys): _iterencode = c_make_encoder( markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan) else: _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot) return _iterencode(o, 0) def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, ## HACK: hand-optimized bytecode; turn globals into locals False=False, True=True, ValueError=ValueError, basestring=basestring, dict=dict, float=float, id=id, int=int, isinstance=isinstance, list=list, long=long, str=str, tuple=tuple, ): def _iterencode_list(lst, _current_indent_level): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst buf = '[' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (_indent * _current_indent_level) separator = _item_separator + newline_indent buf += newline_indent else: newline_indent = None separator = _item_separator first = True for value in lst: if first: first = False else: buf = separator if isinstance(value, basestring): yield buf + _encoder(value) elif value is None: yield buf + 'null' elif value is True: yield buf + 'true' elif value is False: yield buf + 'false' elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): yield buf + _floatstr(value) else: yield buf if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (_indent * _current_indent_level) yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(dct, _current_indent_level): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (_indent * _current_indent_level) item_separator = _item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = _item_separator first = True if _sort_keys: items = dct.items() items.sort(key=lambda kv: kv[0]) else: items = dct.iteritems() for key, value in items: if isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = _floatstr(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif isinstance(key, (int, long)): key = str(key) elif _skipkeys: continue else: raise TypeError("key " + repr(key) + " is not a string") if first: first = False else: yield item_separator yield _encoder(key) yield _key_separator if isinstance(value, basestring): yield _encoder(value) elif value is None: yield 'null' elif value is True: yield 'true' elif value is False: yield 'false' elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): yield _floatstr(value) else: if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (_indent * _current_indent_level) yield '}' if markers is not None: del markers[markerid] def _iterencode(o, _current_indent_level): if isinstance(o, basestring): yield _encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield _floatstr(o) elif isinstance(o, (list, tuple)): for chunk in _iterencode_list(o, _current_indent_level): yield chunk elif isinstance(o, dict): for chunk in _iterencode_dict(o, _current_indent_level): yield chunk else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o o = _default(o) for chunk in _iterencode(o, _current_indent_level): yield chunk if markers is not None: del markers[markerid] return _iterencode
[ [ 8, 0, 0.0033, 0.0044, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0066, 0.0022, 0, 0.66, 0.0667, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 7, 0, 0.0154, 0.011, 0, 0.66,...
[ "\"\"\"Implementation of JSONEncoder\n\"\"\"", "import re", "try:\n from _speedups import encode_basestring_ascii as \\\n c_encode_basestring_ascii\nexcept ImportError:\n c_encode_basestring_ascii = None", " from _speedups import encode_basestring_ascii as \\\n c_encode_basestring_ascii...
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. :mod:`simplejson` exposes an API familiar to users of the standard library :mod:`marshal` and :mod:`pickle` modules. It is the externally maintained version of the :mod:`json` library contained in Python 2.6, but maintains compatibility with Python 2.4 and Python 2.5 and (currently) has significant performance advantages, even without using the optional C extension for speedups. Encoding basic Python object hierarchies:: >>> import simplejson as json >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print json.dumps("\"foo\bar") "\"foo\bar" >>> print json.dumps(u'\u1234') "\u1234" >>> print json.dumps('\\') "\\" >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) {"a": 0, "b": 0, "c": 0} >>> from StringIO import StringIO >>> io = StringIO() >>> json.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import simplejson as json >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import simplejson as json >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ') >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) { "4": 5, "6": 7 } Decoding JSON:: >>> import simplejson as json >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj True >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' True >>> from StringIO import StringIO >>> io = StringIO('["streaming API"]') >>> json.load(io)[0] == 'streaming API' True Specializing JSON object decoding:: >>> import simplejson as json >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> from decimal import Decimal >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') True Specializing JSON object encoding:: >>> import simplejson as json >>> def encode_complex(obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... raise TypeError(repr(o) + " is not JSON serializable") ... >>> json.dumps(2 + 1j, default=encode_complex) '[2.0, 1.0]' >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) '[2.0, 1.0]' >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) '[2.0, 1.0]' Using simplejson.tool from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -m simplejson.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -m simplejson.tool Expecting property name: line 1 column 2 (char 2) """ __version__ = '2.1.0' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', 'OrderedDict', ] __author__ = 'Bob Ippolito <bob@redivi.com>' from decoder import JSONDecoder, JSONDecodeError from encoder import JSONEncoder try: from collections import OrderedDict except ImportError: from ordered_dict import OrderedDict _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the some chunks written to ``fp`` may be ``unicode`` instances, subject to normal Python ``str`` to ``unicode`` coercion rules. Unless ``fp.write()`` explicitly understands ``unicode`` (as in ``codecs.getwriter()``) this is likely to cause an error. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If *indent* is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, **kw): """Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is false then ``dict`` keys that are not basic types (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the return value will be a ``unicode`` instance subject to normal Python ``str`` to ``unicode`` coercion rules instead of being escaped to an ASCII ``str``. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a string, then JSON array elements and object members will be pretty-printed with a newline followed by that string repeated for each level of nesting. ``None`` (the default) selects the most compact representation without any newlines. For backwards compatibility with versions of simplejson earlier than 2.1.0, an integer is also accepted and is converted to a string with that many spaces. If ``separators`` is an ``(item_separator, dict_separator)`` tuple then it will be used instead of the default ``(', ', ': ')`` separators. ``(',', ':')`` is the most compact JSON representation. ``encoding`` is the character encoding for str instances, default is UTF-8. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and encoding == 'utf-8' and default is None and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, encoding=encoding, default=default, **kw).encode(obj) _default_decoder = JSONDecoder(encoding=None, object_hook=None, object_pairs_hook=None) def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ return loads(fp.read(), encoding=encoding, cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) to a Python object. *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if object_pairs_hook is not None: kw['object_pairs_hook'] = object_pairs_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s) def _toggle_speedups(enabled): import simplejson.decoder as dec import simplejson.encoder as enc import simplejson.scanner as scan try: from simplejson._speedups import make_encoder as c_make_encoder except ImportError: c_make_encoder = None if enabled: dec.scanstring = dec.c_scanstring or dec.py_scanstring enc.c_make_encoder = c_make_encoder enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or enc.py_encode_basestring_ascii) scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner else: dec.scanstring = dec.py_scanstring enc.c_make_encoder = None enc.encode_basestring_ascii = enc.py_encode_basestring_ascii scan.make_scanner = scan.py_make_scanner dec.make_scanner = scan.make_scanner global _default_decoder _default_decoder = JSONDecoder( encoding=None, object_hook=None, object_pairs_hook=None, ) global _default_encoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, encoding='utf-8', default=None, )
[ [ 8, 0, 0.1232, 0.2438, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.2463, 0.0025, 0, 0.66, 0.0769, 162, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.2537, 0.0123, 0, 0.66...
[ "r\"\"\"JSON (JavaScript Object Notation) <http://json.org> is a subset of\nJavaScript syntax (ECMA-262 3rd edition) used as a lightweight data\ninterchange format.\n\n:mod:`simplejson` exposes an API familiar to users of the standard library\n:mod:`marshal` and :mod:`pickle` modules. It is the externally maintaine...
import cgi import urllib import time import random import urlparse import hmac import base64 VERSION = '1.0' # Hi Blaine! HTTP_METHOD = 'GET' SIGNATURE_METHOD = 'PLAINTEXT' # Generic exception class class OAuthError(RuntimeError): def __init__(self, message='OAuth error occured.'): self.message = message # optional WWW-Authenticate header (401 error) def build_authenticate_header(realm=''): return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} # url escape def escape(s): # escape '/' too return urllib.quote(s, safe='~') # util function: current timestamp # seconds since epoch (UTC) def generate_timestamp(): return int(time.time()) # util function: nonce # pseudorandom number def generate_nonce(length=8): return ''.join(str(random.randint(0, 9)) for i in range(length)) # OAuthConsumer is a data type that represents the identity of the Consumer # via its shared secret with the Service Provider. class OAuthConsumer(object): key = None secret = None def __init__(self, key, secret): self.key = key self.secret = secret # OAuthToken is a data type that represents an End User via either an access # or request token. class OAuthToken(object): # access tokens and request tokens key = None secret = None ''' key = the token secret = the token secret ''' def __init__(self, key, secret): self.key = key self.secret = secret def to_string(self): return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret}) # return a token from something like: # oauth_token_secret=digg&oauth_token=digg @staticmethod def from_string(s): params = cgi.parse_qs(s, keep_blank_values=False) key = params['oauth_token'][0] secret = params['oauth_token_secret'][0] return OAuthToken(key, secret) def __str__(self): return self.to_string() # OAuthRequest represents the request and can be serialized class OAuthRequest(object): ''' OAuth parameters: - oauth_consumer_key - oauth_token - oauth_signature_method - oauth_signature - oauth_timestamp - oauth_nonce - oauth_version ... any additional parameters, as defined by the Service Provider. ''' parameters = None # oauth parameters http_method = HTTP_METHOD http_url = None version = VERSION def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): self.http_method = http_method self.http_url = http_url self.parameters = parameters or {} def set_parameter(self, parameter, value): self.parameters[parameter] = value def get_parameter(self, parameter): try: return self.parameters[parameter] except: raise OAuthError('Parameter not found: %s' % parameter) def _get_timestamp_nonce(self): return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce') # get any non-oauth parameters def get_nonoauth_parameters(self): parameters = {} for k, v in self.parameters.iteritems(): # ignore oauth parameters if k.find('oauth_') < 0: parameters[k] = v return parameters # serialize as a header for an HTTPAuth request def to_header(self, realm=''): auth_header = 'OAuth realm="%s"' % realm # add the oauth parameters if self.parameters: for k, v in self.parameters.iteritems(): auth_header += ', %s="%s"' % (k, escape(str(v))) return {'Authorization': auth_header} # serialize as post data for a POST request def to_postdata(self): return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()) # serialize as a url for a GET request def to_url(self): return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) # return a string that consists of all the parameters that need to be signed def get_normalized_parameters(self): params = self.parameters try: # exclude the signature if it exists del params['oauth_signature'] except: pass key_values = params.items() # sort lexicographically, first after key, then after value key_values.sort() # combine key value pairs in string and escape return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values) # just uppercases the http method def get_normalized_http_method(self): return self.http_method.upper() # parses the url and rebuilds it to be scheme://host/path def get_normalized_http_url(self): parts = urlparse.urlparse(self.http_url) url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path return url_string # set the signature parameter to the result of build_signature def sign_request(self, signature_method, consumer, token): # set the signature method self.set_parameter('oauth_signature_method', signature_method.get_name()) # set the signature self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) def build_signature(self, signature_method, consumer, token): # call the build signature method within the signature method return signature_method.build_signature(self, consumer, token) @staticmethod def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): # combine multiple parameter sources if parameters is None: parameters = {} # headers if headers and 'Authorization' in headers: auth_header = headers['Authorization'] # check that the authorization header is OAuth if auth_header.index('OAuth') > -1: try: # get the parameters from the header header_params = OAuthRequest._split_header(auth_header) parameters.update(header_params) except: raise OAuthError('Unable to parse OAuth parameters from Authorization header.') # GET or POST query string if query_string: query_params = OAuthRequest._split_url_string(query_string) parameters.update(query_params) # URL parameters param_str = urlparse.urlparse(http_url)[4] # query url_params = OAuthRequest._split_url_string(param_str) parameters.update(url_params) if parameters: return OAuthRequest(http_method, http_url, parameters) return None @staticmethod def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} defaults = { 'oauth_consumer_key': oauth_consumer.key, 'oauth_timestamp': generate_timestamp(), 'oauth_nonce': generate_nonce(), 'oauth_version': OAuthRequest.version, } defaults.update(parameters) parameters = defaults if token: parameters['oauth_token'] = token.key return OAuthRequest(http_method, http_url, parameters) @staticmethod def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} parameters['oauth_token'] = token.key if callback: parameters['oauth_callback'] = escape(callback) return OAuthRequest(http_method, http_url, parameters) # util function: turn Authorization: header into parameters, has to do some unescaping @staticmethod def _split_header(header): params = {} parts = header.split(',') for param in parts: # ignore realm parameter if param.find('OAuth realm') > -1: continue # remove whitespace param = param.strip() # split key-value param_parts = param.split('=', 1) # remove quotes and unescape the value params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) return params # util function: turn url string into parameters, has to do some unescaping @staticmethod def _split_url_string(param_str): parameters = cgi.parse_qs(param_str, keep_blank_values=False) for k, v in parameters.iteritems(): parameters[k] = urllib.unquote(v[0]) return parameters # OAuthServer is a worker to check a requests validity against a data store class OAuthServer(object): timestamp_threshold = 300 # in seconds, five minutes version = VERSION signature_methods = None data_store = None def __init__(self, data_store=None, signature_methods=None): self.data_store = data_store self.signature_methods = signature_methods or {} def set_data_store(self, oauth_data_store): self.data_store = data_store def get_data_store(self): return self.data_store def add_signature_method(self, signature_method): self.signature_methods[signature_method.get_name()] = signature_method return self.signature_methods # process a request_token request # returns the request token on success def fetch_request_token(self, oauth_request): try: # get the request token for authorization token = self._get_token(oauth_request, 'request') except OAuthError: # no token required for the initial token request version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) self._check_signature(oauth_request, consumer, None) # fetch a new token token = self.data_store.fetch_request_token(consumer) return token # process an access_token request # returns the access token on success def fetch_access_token(self, oauth_request): version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) # get the request token token = self._get_token(oauth_request, 'request') self._check_signature(oauth_request, consumer, token) new_token = self.data_store.fetch_access_token(consumer, token) return new_token # verify an api call, checks all the parameters def verify_request(self, oauth_request): # -> consumer and token version = self._get_version(oauth_request) consumer = self._get_consumer(oauth_request) # get the access token token = self._get_token(oauth_request, 'access') self._check_signature(oauth_request, consumer, token) parameters = oauth_request.get_nonoauth_parameters() return consumer, token, parameters # authorize a request token def authorize_token(self, token, user): return self.data_store.authorize_request_token(token, user) # get the callback url def get_callback(self, oauth_request): return oauth_request.get_parameter('oauth_callback') # optional support for the authenticate header def build_authenticate_header(self, realm=''): return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} # verify the correct version request for this server def _get_version(self, oauth_request): try: version = oauth_request.get_parameter('oauth_version') except: version = VERSION if version and version != self.version: raise OAuthError('OAuth version %s not supported.' % str(version)) return version # figure out the signature with some defaults def _get_signature_method(self, oauth_request): try: signature_method = oauth_request.get_parameter('oauth_signature_method') except: signature_method = SIGNATURE_METHOD try: # get the signature method object signature_method = self.signature_methods[signature_method] except: signature_method_names = ', '.join(self.signature_methods.keys()) raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) return signature_method def _get_consumer(self, oauth_request): consumer_key = oauth_request.get_parameter('oauth_consumer_key') if not consumer_key: raise OAuthError('Invalid consumer key.') consumer = self.data_store.lookup_consumer(consumer_key) if not consumer: raise OAuthError('Invalid consumer.') return consumer # try to find the token for the provided request token key def _get_token(self, oauth_request, token_type='access'): token_field = oauth_request.get_parameter('oauth_token') token = self.data_store.lookup_token(token_type, token_field) if not token: raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) return token def _check_signature(self, oauth_request, consumer, token): timestamp, nonce = oauth_request._get_timestamp_nonce() self._check_timestamp(timestamp) self._check_nonce(consumer, token, nonce) signature_method = self._get_signature_method(oauth_request) try: signature = oauth_request.get_parameter('oauth_signature') except: raise OAuthError('Missing signature.') # validate the signature valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) if not valid_sig: key, base = signature_method.build_signature_base_string(oauth_request, consumer, token) raise OAuthError('Invalid signature. Expected signature base string: %s' % base) built = signature_method.build_signature(oauth_request, consumer, token) def _check_timestamp(self, timestamp): # verify that timestamp is recentish timestamp = int(timestamp) now = int(time.time()) lapsed = now - timestamp if lapsed > self.timestamp_threshold: raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) def _check_nonce(self, consumer, token, nonce): # verify that the nonce is uniqueish nonce = self.data_store.lookup_nonce(consumer, token, nonce) if nonce: raise OAuthError('Nonce already used: %s' % str(nonce)) # OAuthClient is a worker to attempt to execute a request class OAuthClient(object): consumer = None token = None def __init__(self, oauth_consumer, oauth_token): self.consumer = oauth_consumer self.token = oauth_token def get_consumer(self): return self.consumer def get_token(self): return self.token def fetch_request_token(self, oauth_request): # -> OAuthToken raise NotImplementedError def fetch_access_token(self, oauth_request): # -> OAuthToken raise NotImplementedError def access_resource(self, oauth_request): # -> some protected resource raise NotImplementedError # OAuthDataStore is a database abstraction used to lookup consumers and tokens class OAuthDataStore(object): def lookup_consumer(self, key): # -> OAuthConsumer raise NotImplementedError def lookup_token(self, oauth_consumer, token_type, token_token): # -> OAuthToken raise NotImplementedError def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp): # -> OAuthToken raise NotImplementedError def fetch_request_token(self, oauth_consumer): # -> OAuthToken raise NotImplementedError def fetch_access_token(self, oauth_consumer, oauth_token): # -> OAuthToken raise NotImplementedError def authorize_request_token(self, oauth_token, user): # -> OAuthToken raise NotImplementedError # OAuthSignatureMethod is a strategy class that implements a signature method class OAuthSignatureMethod(object): def get_name(self): # -> str raise NotImplementedError def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): # -> str key, str raw raise NotImplementedError def build_signature(self, oauth_request, oauth_consumer, oauth_token): # -> str raise NotImplementedError def check_signature(self, oauth_request, consumer, token, signature): built = self.build_signature(oauth_request, consumer, token) return built == signature class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): def get_name(self): return 'HMAC-SHA1' def build_signature_base_string(self, oauth_request, consumer, token): sig = ( escape(oauth_request.get_normalized_http_method()), escape(oauth_request.get_normalized_http_url()), escape(oauth_request.get_normalized_parameters()), ) key = '%s&' % escape(consumer.secret) if token: key += escape(token.secret) raw = '&'.join(sig) return key, raw def build_signature(self, oauth_request, consumer, token): # build the base signature string key, raw = self.build_signature_base_string(oauth_request, consumer, token) # hmac object try: import hashlib # 2.5 hashed = hmac.new(key, raw, hashlib.sha1) except: import sha # deprecated hashed = hmac.new(key, raw, sha) # calculate the digest base 64 return base64.b64encode(hashed.digest()) class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): def get_name(self): return 'PLAINTEXT' def build_signature_base_string(self, oauth_request, consumer, token): # concatenate the consumer key and secret sig = escape(consumer.secret) + '&' if token: sig = sig + escape(token.secret) return sig def build_signature(self, oauth_request, consumer, token): return self.build_signature_base_string(oauth_request, consumer, token)
[ [ 1, 0, 0.0019, 0.0019, 0, 0.66, 0, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 1, 0, 0.0038, 0.0019, 0, 0.66, 0.0435, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0057, 0.0019, 0, ...
[ "import cgi", "import urllib", "import time", "import random", "import urlparse", "import hmac", "import base64", "VERSION = '1.0' # Hi Blaine!", "HTTP_METHOD = 'GET'", "SIGNATURE_METHOD = 'PLAINTEXT'", "class OAuthError(RuntimeError):\n def __init__(self, message='OAuth error occured.'):\n ...
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the wavelet module.""" import unittest import blip import element import ops import wavelet import simplejson ROBOT_NAME = 'robot@appspot.com' TEST_WAVELET_DATA = { 'creator': ROBOT_NAME, 'creationTime': 100, 'lastModifiedTime': 101, 'participants': [ROBOT_NAME], 'participantsRoles': {ROBOT_NAME: wavelet.Participants.ROLE_FULL}, 'rootBlipId': 'blip-1', 'title': 'Title', 'waveId': 'test.com!w+g3h3im', 'waveletId': 'test.com!root+conv', 'tags': ['tag1', 'tag2'], } TEST_BLIP_DATA = { 'blipId': TEST_WAVELET_DATA['rootBlipId'], 'childBlipIds': [], 'content': '\ntesting', 'contributors': [TEST_WAVELET_DATA['creator'], 'robot@google.com'], 'creator': TEST_WAVELET_DATA['creator'], 'lastModifiedTime': TEST_WAVELET_DATA['lastModifiedTime'], 'parentBlipId': None, 'waveId': TEST_WAVELET_DATA['waveId'], 'elements': {}, 'waveletId': TEST_WAVELET_DATA['waveletId'], } class TestWavelet(unittest.TestCase): """Tests the wavelet class.""" def setUp(self): self.operation_queue = ops.OperationQueue() self.all_blips = {} self.blip = blip.Blip(TEST_BLIP_DATA, self.all_blips, self.operation_queue) self.all_blips[self.blip.blip_id] = self.blip self.wavelet = wavelet.Wavelet(TEST_WAVELET_DATA, self.all_blips, None, self.operation_queue) self.wavelet.robot_address = ROBOT_NAME def testWaveletProperties(self): w = self.wavelet self.assertEquals(TEST_WAVELET_DATA['creator'], w.creator) self.assertEquals(TEST_WAVELET_DATA['creationTime'], w.creation_time) self.assertEquals(TEST_WAVELET_DATA['lastModifiedTime'], w.last_modified_time) self.assertEquals(len(TEST_WAVELET_DATA['participants']), len(w.participants)) self.assertTrue(TEST_WAVELET_DATA['participants'][0] in w.participants) self.assertEquals(TEST_WAVELET_DATA['rootBlipId'], w.root_blip.blip_id) self.assertEquals(TEST_WAVELET_DATA['title'], w.title) self.assertEquals(TEST_WAVELET_DATA['waveId'], w.wave_id) self.assertEquals(TEST_WAVELET_DATA['waveletId'], w.wavelet_id) self.assertEquals('test.com', w.domain) def testWaveletMethods(self): w = self.wavelet reply = w.reply() self.assertEquals(2, len(w.blips)) w.delete(reply) self.assertEquals(1, len(w.blips)) self.assertEquals(0, len(w.data_documents)) self.wavelet.data_documents['key'] = 'value' self.assert_('key' in w.data_documents) self.assertEquals(1, len(w.data_documents)) for key in w.data_documents: self.assertEquals(key, 'key') self.assertEquals(1, len(w.data_documents.keys())) self.wavelet.data_documents['key'] = None self.assertEquals(0, len(w.data_documents)) num_participants = len(w.participants) w.proxy_for('proxy').reply() self.assertEquals(2, len(w.blips)) # check that the new proxy for participant was added self.assertEquals(num_participants + 1, len(w.participants)) w._robot_address = ROBOT_NAME.replace('@', '+proxy@') w.proxy_for('proxy').reply() self.assertEquals(num_participants + 1, len(w.participants)) self.assertEquals(3, len(w.blips)) def testSetTitle(self): self.blip._content = '\nOld title\n\nContent' self.wavelet.title = 'New title \xd0\xb0\xd0\xb1\xd0\xb2' self.assertEquals(1, len(self.operation_queue)) self.assertEquals('wavelet.setTitle', self.operation_queue.serialize()[1]['method']) self.assertEquals(u'\nNew title \u0430\u0431\u0432\n\nContent', self.blip._content) def testSetTitleAdjustRootBlipWithOneLineProperly(self): self.blip._content = '\nOld title' self.wavelet.title = 'New title' self.assertEquals(1, len(self.operation_queue)) self.assertEquals('wavelet.setTitle', self.operation_queue.serialize()[1]['method']) self.assertEquals('\nNew title\n', self.blip._content) def testSetTitleAdjustEmptyRootBlipProperly(self): self.blip._content = '\n' self.wavelet.title = 'New title' self.assertEquals(1, len(self.operation_queue)) self.assertEquals('wavelet.setTitle', self.operation_queue.serialize()[1]['method']) self.assertEquals('\nNew title\n', self.blip._content) def testTags(self): w = self.wavelet self.assertEquals(2, len(w.tags)) w.tags.append('tag3') self.assertEquals(3, len(w.tags)) w.tags.append('tag3') self.assertEquals(3, len(w.tags)) w.tags.remove('tag1') self.assertEquals(2, len(w.tags)) self.assertEquals('tag2', w.tags[0]) def testParticipantRoles(self): w = self.wavelet self.assertEquals(wavelet.Participants.ROLE_FULL, w.participants.get_role(ROBOT_NAME)) w.participants.set_role(ROBOT_NAME, wavelet.Participants.ROLE_READ_ONLY) self.assertEquals(wavelet.Participants.ROLE_READ_ONLY, w.participants.get_role(ROBOT_NAME)) def testSerialize(self): self.blip.append(element.Gadget('http://test.com', {'a': 3})) self.wavelet.title = 'A wavelet title' self.blip.append(element.Image(url='http://www.google.com/logos/clickortreat1.gif', width=320, height=118)) self.blip.append(element.Attachment(caption='fake', data='fake data')) self.blip.append(element.Line(line_type='li', indent='2')) self.blip.append('bulleted!') self.blip.append(element.Installer( 'http://wave-skynet.appspot.com/public/extensions/areyouin/manifest.xml')) self.wavelet.proxy_for('proxy').reply().append('hi from douwe') inlineBlip = self.blip.insert_inline_blip(5) inlineBlip.append('hello again!') serialized = self.wavelet.serialize() serialized = simplejson.dumps(serialized) self.assertTrue(serialized.find('test.com') > 0) if __name__ == '__main__': unittest.main()
[ [ 8, 0, 0.096, 0.0056, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.113, 0.0056, 0, 0.66, 0.0909, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.1243, 0.0056, 0, 0.66, ...
[ "\"\"\"Unit tests for the wavelet module.\"\"\"", "import unittest", "import blip", "import element", "import ops", "import wavelet", "import simplejson", "ROBOT_NAME = 'robot@appspot.com'", "TEST_WAVELET_DATA = {\n 'creator': ROBOT_NAME,\n 'creationTime': 100,\n 'lastModifiedTime': 101,\n ...
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Declares the api package."""
[ [ 8, 0, 1, 0.0588, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ] ]
[ "\"\"\"Declares the api package.\"\"\"" ]
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains various API-specific exception classes. This module contains various specific exception classes that are raised by the library back to the client. """ class Error(Exception): """Base library error type."""
[ [ 8, 0, 0.76, 0.2, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.98, 0.08, 0, 0.66, 1, 529, 0, 0, 0, 0, 645, 0, 0 ], [ 8, 1, 1, 0.04, 1, 0.64, 0, 0, 1...
[ "\"\"\"Contains various API-specific exception classes.\n\nThis module contains various specific exception classes that are raised by\nthe library back to the client.\n\"\"\"", "class Error(Exception):\n \"\"\"Base library error type.\"\"\"", " \"\"\"Base library error type.\"\"\"" ]
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines event types that are sent from the wave server. This module defines all of the event types currently supported by the wave server. Each event type is sub classed from Event and has its own properties depending on the type. """ class Context(object): """Specifies constants representing different context requests.""" #: Requests the root blip. ROOT = 'ROOT' #: Requests the parent blip of the event blip. PARENT = 'PARENT' #: Requests the siblings blip of the event blip. SIBLINGS = 'SIBLINGS' #: Requests the child blips of the event blip. CHILDREN = 'CHILDREN' #: Requests the event blip itself. SELF = 'SELF' #: Requests all of the blips of the event wavelet. ALL = 'ALL' class Event(object): """Object describing a single event. Attributes: modified_by: Participant id that caused this event. timestamp: Timestamp that this event occurred on the server. type: Type string of this event. properties: Dictionary of all extra properties. Typically the derrived event type should have these explicitly set as attributes, but experimental features might appear in properties before that. blip_id: The blip_id of the blip for blip related events or the root blip for wavelet related events. blip: If available, the blip with id equal to the events blip_id. proxying_for: If available, the proxyingFor id of the robot that caused the event. """ def __init__(self, json, wavelet): """Inits this event with JSON data. Args: json: JSON data from Wave server. """ self.modified_by = json.get('modifiedBy') self.timestamp = json.get('timestamp', 0) self.type = json.get('type') self.raw_data = json self.properties = json.get('properties', {}) self.blip_id = self.properties.get('blipId') self.blip = wavelet.blips.get(self.blip_id) self.proxying_for = json.get('proxyingFor') class WaveletBlipCreated(Event): """Event triggered when a new blip is created. Attributes: new_blip_id: The id of the newly created blip. new_blip: If in context, the actual new blip. """ type = 'WAVELET_BLIP_CREATED' def __init__(self, json, wavelet): super(WaveletBlipCreated, self).__init__(json, wavelet) self.new_blip_id = self.properties['newBlipId'] self.new_blip = wavelet.blips.get(self.new_blip_id) class WaveletBlipRemoved(Event): """Event triggered when a new blip is removed. Attributes: removed_blip_id: the id of the removed blip removed_blip: if in context, the removed blip """ type = 'WAVELET_BLIP_REMOVED' def __init__(self, json, wavelet): super(WaveletBlipRemoved, self).__init__(json, wavelet) self.removed_blip_id = self.properties['removedBlipId'] self.removed_blip = wavelet.blips.get(self.removed_blip_id) class WaveletParticipantsChanged(Event): """Event triggered when the participants on a wave change. Attributes: participants_added: List of participants added. participants_removed: List of participants removed. """ type = 'WAVELET_PARTICIPANTS_CHANGED' def __init__(self, json, wavelet): super(WaveletParticipantsChanged, self).__init__(json, wavelet) self.participants_added = self.properties['participantsAdded'] self.participants_removed = self.properties['participantsRemoved'] class WaveletSelfAdded(Event): """Event triggered when the robot is added to the wavelet.""" type = 'WAVELET_SELF_ADDED' class WaveletSelfRemoved(Event): """Event triggered when the robot is removed from the wavelet.""" type = 'WAVELET_SELF_REMOVED' class WaveletTitleChanged(Event): """Event triggered when the title of the wavelet has changed. Attributes: title: The new title. """ type = 'WAVELET_TITLE_CHANGED' def __init__(self, json, wavelet): super(WaveletTitleChanged, self).__init__(json, wavelet) self.title = self.properties['title'] class BlipContributorsChanged(Event): """Event triggered when the contributors to this blip change. Attributes: contributors_added: List of contributors that were added. contributors_removed: List of contributors that were removed. """ type = 'BLIP_CONTRIBUTORS_CHANGED' def __init__(self, json, wavelet): super(BlipContributorsChanged, self).__init__(json, wavelet) self.contibutors_added = self.properties['contributorsAdded'] self.contibutors_removed = self.properties['contributorsRemoved'] class BlipSubmitted(Event): """Event triggered when a blip is submitted.""" type = 'BLIP_SUBMITTED' class DocumentChanged(Event): """Event triggered when a document is changed. This event is fired after any changes in the document and should be used carefully to keep the amount of traffic to the robot reasonable. Use filters where appropriate. """ type = 'DOCUMENT_CHANGED' class FormButtonClicked(Event): """Event triggered when a form button is clicked. Attributes: button_name: The name of the button that was clicked. """ type = 'FORM_BUTTON_CLICKED' def __init__(self, json, wavelet): super(FormButtonClicked, self).__init__(json, wavelet) self.button_name = self.properties['buttonName'] class GadgetStateChanged(Event): """Event triggered when the state of a gadget changes. Attributes: index: The index of the gadget that changed in the document. old_state: The old state of the gadget. """ type = 'GADGET_STATE_CHANGED' def __init__(self, json, wavelet): super(GadgetStateChanged, self).__init__(json, wavelet) self.index = self.properties['index'] self.old_state = self.properties['oldState'] class AnnotatedTextChanged(Event): """Event triggered when text with an annotation has changed. This is mainly useful in combination with a filter on the name of the annotation. Attributes: name: The name of the annotation. value: The value of the annotation that changed. """ type = 'ANNOTATED_TEXT_CHANGED' def __init__(self, json, wavelet): super(AnnotatedTextChanged, self).__init__(json, wavelet) self.name = self.properties['name'] self.value = self.properties.get('value') class OperationError(Event): """Triggered when an event on the server occurred. Attributes: operation_id: The operation id of the failing operation. error_message: More information as to what went wrong. """ type = 'OPERATION_ERROR' def __init__(self, json, wavelet): super(OperationError, self).__init__(json, wavelet) self.operation_id = self.properties['operationId'] self.error_message = self.properties['message'] class WaveletCreated(Event): """Triggered when a new wavelet is created. This event is only triggered if the robot creates a new wavelet and can be used to initialize the newly created wave. wavelets created by other participants remain invisible to the robot until the robot is added to the wave in which case WaveletSelfAdded is triggered. Attributes: message: Whatever string was passed into the new_wave call as message (if any). """ type = 'WAVELET_CREATED' def __init__(self, json, wavelet): super(WaveletCreated, self).__init__(json, wavelet) self.message = self.properties['message'] class WaveletFetched(Event): """Triggered when a new wavelet is fetched. This event is triggered after a robot requests to see another wavelet. The robot has to be on the other wavelet already. Attributes: message: Whatever string was passed into the new_wave call as message (if any). """ type = 'WAVELET_FETCHED' def __init__(self, json, wavelet): super(WaveletFetched, self).__init__(json, wavelet) self.message = self.properties['message'] class WaveletTagsChanged(Event): """Event triggered when the tags on a wavelet change.""" type = 'WAVELET_TAGS_CHANGED' def __init__(self, json, wavelet): super(WaveletTagsChanged, self).__init__(json, wavelet) def is_event(cls): """Returns whether the passed class is an event.""" try: if not issubclass(cls, Event): return False return hasattr(cls, 'type') except TypeError: return False ALL = [item for item in globals().copy().values() if is_event(item)]
[ [ 8, 0, 0.0648, 0.0199, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.1063, 0.0498, 0, 0.66, 0.05, 560, 0, 0, 0, 0, 186, 0, 0 ], [ 8, 1, 0.0864, 0.0033, 1, 0.31, ...
[ "\"\"\"Defines event types that are sent from the wave server.\n\nThis module defines all of the event types currently supported by the wave\nserver. Each event type is sub classed from Event and has its own\nproperties depending on the type.\n\"\"\"", "class Context(object):\n \"\"\"Specifies constants represen...
#! /usr/bin/python # -*- coding: UTF-8 -*- from notifiy.robot import create_robot if __name__ == '__main__': create_robot()
[ [ 1, 0, 0.5, 0.125, 0, 0.66, 0, 413, 0, 1, 0, 0, 413, 0, 0 ], [ 4, 0, 0.9375, 0.25, 0, 0.66, 1, 0, 0, 0, 0, 0, 0, 0, 1 ], [ 8, 1, 1, 0.125, 1, 0.83, 0, 641,...
[ "from notifiy.robot import create_robot", "if __name__ == '__main__':\n create_robot()", " create_robot()" ]
#!/usr/bin/env python # -*- coding: UTF-8 -*- from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app from notifiy.home import Home from notifiy.proc import Process from notifiy.proc_phone import PhoneProcess from notifiy.receive_email import ReceiveEmail if __name__ == "__main__": run_wsgi_app(webapp.WSGIApplication([ ('/', Home), ('/proc/.*', Process), ('/phone/.*', PhoneProcess), ('/_ah/mail/.+', ReceiveEmail) ]))
[ [ 1, 0, 0.2353, 0.0588, 0, 0.66, 0, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.2941, 0.0588, 0, 0.66, 0.1667, 327, 0, 1, 0, 0, 327, 0, 0 ], [ 1, 0, 0.4118, 0.0588, 0, ...
[ "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "from notifiy.home import Home", "from notifiy.proc import Process", "from notifiy.proc_phone import PhoneProcess", "from notifiy.receive_email import ReceiveEmail", "if __name__ == \"__main__\":\n ...
#!/usr/bin/env python # -*- coding: UTF-8 -*- from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app class Index(webapp.RequestHandler): def get(self): self.redirect('%s/index.html' % self.request.path) if __name__ == "__main__": run_wsgi_app(webapp.WSGIApplication([ ('.*', Index), ]))
[ [ 1, 0, 0.25, 0.0625, 0, 0.66, 0, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.3125, 0.0625, 0, 0.66, 0.3333, 327, 0, 1, 0, 0, 327, 0, 0 ], [ 3, 0, 0.5938, 0.25, 0, 0.66...
[ "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_wsgi_app", "class Index(webapp.RequestHandler):\n\n def get(self):\n self.redirect('%s/index.html' % self.request.path)", " def get(self):\n self.redirect('%s/index.html' % self.request.path)", "...
''' Module which brings history information about files from Mercurial. @author: Rodrigo Damazio ''' import re import subprocess REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*') def _GetOutputLines(args): ''' Runs an external process and returns its output as a list of lines. @param args: the arguments to run ''' process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines = True, shell = False) output = process.communicate()[0] return output.splitlines() def FillMercurialRevisions(filename, parsed_file): ''' Fills the revs attribute of all strings in the given parsed file with a list of revisions that touched the lines corresponding to that string. @param filename: the name of the file to get history for @param parsed_file: the parsed file to modify ''' # Take output of hg annotate to get revision of each line output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename]) # Create a map of line -> revision (key is list index, line 0 doesn't exist) line_revs = ['dummy'] for line in output_lines: rev_match = REVISION_REGEX.match(line) if not rev_match: raise 'Unexpected line of output from hg: %s' % line rev_hash = rev_match.group('hash') line_revs.append(rev_hash) for str in parsed_file.itervalues(): # Get the lines that correspond to each string start_line = str['startLine'] end_line = str['endLine'] # Get the revisions that touched those lines revs = [] for line_number in range(start_line, end_line + 1): revs.append(line_revs[line_number]) # Merge with any revisions that were already there # (for explict revision specification) if 'revs' in str: revs += str['revs'] # Assign the revisions to the string str['revs'] = frozenset(revs) def DoesRevisionSuperceed(filename, rev1, rev2): ''' Tells whether a revision superceeds another. This essentially means that the older revision is an ancestor of the newer one. This also returns True if the two revisions are the same. @param rev1: the revision that may be superceeding the other @param rev2: the revision that may be superceeded @return: True if rev1 superceeds rev2 or they're the same ''' if rev1 == rev2: return True # TODO: Add filename args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename] output_lines = _GetOutputLines(args) return rev2 in output_lines def NewestRevision(filename, rev1, rev2): ''' Returns which of two revisions is closest to the head of the repository. If none of them is the ancestor of the other, then we return either one. @param rev1: the first revision @param rev2: the second revision ''' if DoesRevisionSuperceed(filename, rev1, rev2): return rev1 return rev2
[ [ 8, 0, 0.0319, 0.0532, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0745, 0.0106, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0851, 0.0106, 0, 0.66...
[ "'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''", "import re", "import subprocess", "REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')", "def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines...
#!/usr/bin/python ''' Entry point for My Tracks i18n tool. @author: Rodrigo Damazio ''' import mytracks.files import mytracks.translate import mytracks.validate import sys def Usage(): print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0] print 'Commands are:' print ' cleanup' print ' translate' print ' validate' sys.exit(1) def Translate(languages): ''' Asks the user to interactively translate any missing or oudated strings from the files for the given languages. @param languages: the languages to translate ''' validator = mytracks.validate.Validator(languages) validator.Validate() missing = validator.missing_in_lang() outdated = validator.outdated_in_lang() for lang in languages: untranslated = missing[lang] + outdated[lang] if len(untranslated) == 0: continue translator = mytracks.translate.Translator(lang) translator.Translate(untranslated) def Validate(languages): ''' Computes and displays errors in the string files for the given languages. @param languages: the languages to compute for ''' validator = mytracks.validate.Validator(languages) validator.Validate() error_count = 0 if (validator.valid()): print 'All files OK' else: for lang, missing in validator.missing_in_master().iteritems(): print 'Missing in master, present in %s: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, missing in validator.missing_in_lang().iteritems(): print 'Missing in %s, present in master: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, outdated in validator.outdated_in_lang().iteritems(): print 'Outdated in %s: %s:' % (lang, str(outdated)) error_count = error_count + len(outdated) return error_count if __name__ == '__main__': argv = sys.argv argc = len(argv) if argc < 2: Usage() languages = mytracks.files.GetAllLanguageFiles() if argc == 3: langs = set(argv[2:]) if not langs.issubset(languages): raise 'Language(s) not found' # Filter just to the languages specified languages = dict((lang, lang_file) for lang, lang_file in languages.iteritems() if lang in langs or lang == 'en' ) cmd = argv[1] if cmd == 'translate': Translate(languages) elif cmd == 'validate': error_count = Validate(languages) else: Usage() error_count = 0 print '%d errors found.' % error_count
[ [ 8, 0, 0.0417, 0.0521, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0833, 0.0104, 0, 0.66, 0.125, 640, 0, 1, 0, 0, 640, 0, 0 ], [ 1, 0, 0.0938, 0.0104, 0, 0.66,...
[ "'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''", "import mytracks.files", "import mytracks.translate", "import mytracks.validate", "import sys", "def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p...
''' Module which prompts the user for translations and saves them. TODO: implement @author: Rodrigo Damazio ''' class Translator(object): ''' classdocs ''' def __init__(self, language): ''' Constructor ''' self._language = language def Translate(self, string_names): print string_names
[ [ 8, 0, 0.1905, 0.3333, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.7143, 0.619, 0, 0.66, 1, 229, 0, 2, 0, 0, 186, 0, 1 ], [ 8, 1, 0.5238, 0.1429, 1, 0.33, ...
[ "'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''", "class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor", " '''\n classdocs\n '''", " def __init__(self, language):\n '''...
''' Module which compares languague files to the master file and detects issues. @author: Rodrigo Damazio ''' import os from mytracks.parser import StringsParser import mytracks.history class Validator(object): def __init__(self, languages): ''' Builds a strings file validator. Params: @param languages: a dictionary mapping each language to its corresponding directory ''' self._langs = {} self._master = None self._language_paths = languages parser = StringsParser() for lang, lang_dir in languages.iteritems(): filename = os.path.join(lang_dir, 'strings.xml') parsed_file = parser.Parse(filename) mytracks.history.FillMercurialRevisions(filename, parsed_file) if lang == 'en': self._master = parsed_file else: self._langs[lang] = parsed_file self._Reset() def Validate(self): ''' Computes whether all the data in the files for the given languages is valid. ''' self._Reset() self._ValidateMissingKeys() self._ValidateOutdatedKeys() def valid(self): return (len(self._missing_in_master) == 0 and len(self._missing_in_lang) == 0 and len(self._outdated_in_lang) == 0) def missing_in_master(self): return self._missing_in_master def missing_in_lang(self): return self._missing_in_lang def outdated_in_lang(self): return self._outdated_in_lang def _Reset(self): # These are maps from language to string name list self._missing_in_master = {} self._missing_in_lang = {} self._outdated_in_lang = {} def _ValidateMissingKeys(self): ''' Computes whether there are missing keys on either side. ''' master_keys = frozenset(self._master.iterkeys()) for lang, file in self._langs.iteritems(): keys = frozenset(file.iterkeys()) missing_in_master = keys - master_keys missing_in_lang = master_keys - keys if len(missing_in_master) > 0: self._missing_in_master[lang] = missing_in_master if len(missing_in_lang) > 0: self._missing_in_lang[lang] = missing_in_lang def _ValidateOutdatedKeys(self): ''' Computers whether any of the language keys are outdated with relation to the master keys. ''' for lang, file in self._langs.iteritems(): outdated = [] for key, str in file.iteritems(): # Get all revisions that touched master and language files for this # string. master_str = self._master[key] master_revs = master_str['revs'] lang_revs = str['revs'] if not master_revs or not lang_revs: print 'WARNING: No revision for %s in %s' % (key, lang) continue master_file = os.path.join(self._language_paths['en'], 'strings.xml') lang_file = os.path.join(self._language_paths[lang], 'strings.xml') # Assume that the repository has a single head (TODO: check that), # and as such there is always one revision which superceeds all others. master_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2), master_revs) lang_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2), lang_revs) # If the master version is newer than the lang version if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev): outdated.append(key) if len(outdated) > 0: self._outdated_in_lang[lang] = outdated
[ [ 8, 0, 0.0304, 0.0522, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0783, 0.0087, 0, 0.66, ...
[ "'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''", "import os", "from mytracks.parser import StringsParser", "import mytracks.history", "class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida...
''' Module for dealing with resource files (but not their contents). @author: Rodrigo Damazio ''' import os.path from glob import glob import re MYTRACKS_RES_DIR = 'MyTracks/res' ANDROID_MASTER_VALUES = 'values' ANDROID_VALUES_MASK = 'values-*' def GetMyTracksDir(): ''' Returns the directory in which the MyTracks directory is located. ''' path = os.getcwd() while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)): if path == '/': raise 'Not in My Tracks project' # Go up one level path = os.path.split(path)[0] return path def GetAllLanguageFiles(): ''' Returns a mapping from all found languages to their respective directories. ''' mytracks_path = GetMyTracksDir() res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK) language_dirs = glob(res_dir) master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES) if len(language_dirs) == 0: raise 'No languages found!' if not os.path.isdir(master_dir): raise 'Couldn\'t find master file' language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs] language_tuples.append(('en', master_dir)) return dict(language_tuples)
[ [ 8, 0, 0.0667, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.125, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1556, 0.0222, 0, 0.66, ...
[ "'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''", "import os.path", "from glob import glob", "import re", "MYTRACKS_RES_DIR = 'MyTracks/res'", "ANDROID_MASTER_VALUES = 'values'", "ANDROID_VALUES_MASK = 'values-*'", "def GetMyTracksDir():\n '''\n...
''' Module which parses a string XML file. @author: Rodrigo Damazio ''' from xml.parsers.expat import ParserCreate import re #import xml.etree.ElementTree as ET class StringsParser(object): ''' Parser for string XML files. This object is not thread-safe and should be used for parsing a single file at a time, only. ''' def Parse(self, file): ''' Parses the given file and returns a dictionary mapping keys to an object with attributes for that key, such as the value, start/end line and explicit revisions. In addition to the standard XML format of the strings file, this parser supports an annotation inside comments, in one of these formats: <!-- KEEP_PARENT name="bla" --> <!-- KEEP_PARENT name="bla" rev="123456789012" --> Such an annotation indicates that we're explicitly inheriting form the master file (and the optional revision says that this decision is compatible with the master file up to that revision). @param file: the name of the file to parse ''' self._Reset() # Unfortunately expat is the only parser that will give us line numbers self._xml_parser = ParserCreate() self._xml_parser.StartElementHandler = self._StartElementHandler self._xml_parser.EndElementHandler = self._EndElementHandler self._xml_parser.CharacterDataHandler = self._CharacterDataHandler self._xml_parser.CommentHandler = self._CommentHandler file_obj = open(file) self._xml_parser.ParseFile(file_obj) file_obj.close() return self._all_strings def _Reset(self): self._currentString = None self._currentStringName = None self._currentStringValue = None self._all_strings = {} def _StartElementHandler(self, name, attrs): if name != 'string': return if 'name' not in attrs: return assert not self._currentString assert not self._currentStringName self._currentString = { 'startLine' : self._xml_parser.CurrentLineNumber, } if 'rev' in attrs: self._currentString['revs'] = [attrs['rev']] self._currentStringName = attrs['name'] self._currentStringValue = '' def _EndElementHandler(self, name): if name != 'string': return assert self._currentString assert self._currentStringName self._currentString['value'] = self._currentStringValue self._currentString['endLine'] = self._xml_parser.CurrentLineNumber self._all_strings[self._currentStringName] = self._currentString self._currentString = None self._currentStringName = None self._currentStringValue = None def _CharacterDataHandler(self, data): if not self._currentString: return self._currentStringValue += data _KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+' r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?' r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*', re.MULTILINE | re.DOTALL) def _CommentHandler(self, data): keep_parent_match = self._KEEP_PARENT_REGEX.match(data) if not keep_parent_match: return name = keep_parent_match.group('name') self._all_strings[name] = { 'keepParent' : True, 'startLine' : self._xml_parser.CurrentLineNumber, 'endLine' : self._xml_parser.CurrentLineNumber } rev = keep_parent_match.group('rev') if rev: self._all_strings[name]['revs'] = [rev]
[ [ 8, 0, 0.0261, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0609, 0.0087, 0, 0.66, 0.3333, 573, 0, 1, 0, 0, 573, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66...
[ "'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''", "from xml.parsers.expat import ParserCreate", "import re", "class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n...
# # Generate and deploy the jar and associated files to the Sonatype maven repository. # import os, re, tempfile, subprocess #, sys, datetime, zipfile # Location of the source file that defines the current version VERSION_FILE = '../src/com/caverock/androidsvg/SVG.java' # Version regex VERSION_RE = '\sVERSION\s*=\s*"([\d.]+)"' # Source pom file ORIG_POM_FILE = 'src-pom.xml' # Regex for finding the place in the pom file to insert the version number POM_VERSION_RE = '{{VERSION}}' # The jar file to be deployed JAR_FILE = '../bin/androidsvg.jar' # The dummy sources and javadoc jars SOURCES_JAR_FILE = 'androidsvg-sources.jar' JAVADOC_JAR_FILE = 'androidsvg-javadoc.jar' def main(): # Get the current version number of the library libraryVersion = get_current_version() go = raw_input('\nDo maven deploy for version '+libraryVersion+'? (y/N): ') if not go in ['Y','y']: exit() # Get GPG passphrase #passphrase = raw_input('GPG passphrase: ') #if passphrase == '': # print "Exiting: need passphrase." # exit() # Create a temporary file to hold the generated pom file print 'Creating POM file for this version...' tempPomFile = tempfile.NamedTemporaryFile(suffix='.pom.xml', delete=False) #print tempPomFile.name # Write out a new pom file with the version number set to the latest version srcPomFile = read(ORIG_POM_FILE) tempPomFile.write(re.sub(POM_VERSION_RE, libraryVersion, srcPomFile)) tempPomFile.close() # Sign and deploy the artifact print '\nSigning and deploying artifact...' basecmd = 'mvn gpg:sign-and-deploy-file' basecmd += ' -DpomFile=' + tempPomFile.name basecmd += ' -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/' basecmd += ' -DrepositoryId=sonatype-nexus-staging' #basecmd += ' -Dpassphrase=' + passphrase cmd = basecmd cmd += ' -Dfile=' + os.path.realpath(JAR_FILE) print cmd os.system(cmd) # Sign and deploy the dummy sources print '\nSigning and deploying sources jar...' cmd = basecmd cmd += ' -Dfile=' + os.path.realpath(SOURCES_JAR_FILE) cmd += ' -Dclassifier=sources' print cmd os.system(cmd) # Sign and deploy the dummy javadoc print '\nSigning and deploying javadoc jar...' cmd = basecmd cmd += ' -Dfile=' + os.path.realpath(JAVADOC_JAR_FILE) cmd += ' -Dclassifier=javadoc' print cmd os.system(cmd) # Done print '\nDone!' def read(src): file = open(os.path.realpath(src), "rb") str = file.read() file.close() return str def get_current_version(): versionFile = read(VERSION_FILE) m = re.search(VERSION_RE, versionFile) if (m): return m.group(1) else: return "" def error(msg): print "ERROR: "+ msg exit() if __name__ == "__main__": main()
[ [ 1, 0, 0.0427, 0.0085, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 14, 0, 0.0684, 0.0085, 0, 0.66, 0.0833, 837, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.094, 0.0085, 0, 0...
[ "import os, re, tempfile, subprocess #, sys, datetime, zipfile", "VERSION_FILE = '../src/com/caverock/androidsvg/SVG.java'", "VERSION_RE = '\\sVERSION\\s*=\\s*\"([\\d.]+)\"'", "ORIG_POM_FILE = 'src-pom.xml'", "POM_VERSION_RE = '{{VERSION}}'", "JAR_FILE = '../bin/androidsvg.jar'", "SOURCES_JAR_FILE = '...
import Download pagelist=['http://www.economist.com/'] print("Starting ....") crawler = Download.crawler('') crawler.crawl(pagelist)
[ [ 1, 0, 0.1429, 0.1429, 0, 0.66, 0, 175, 0, 1, 0, 0, 175, 0, 0 ], [ 14, 0, 0.4286, 0.1429, 0, 0.66, 0.25, 340, 0, 0, 0, 0, 0, 5, 0 ], [ 8, 0, 0.5714, 0.1429, 0, 0.6...
[ "import Download", "pagelist=['http://www.economist.com/']", "print(\"Starting ....\")", "crawler = Download.crawler('')", "crawler.crawl(pagelist)" ]
import urllib import re urlArg = 'http://www.yahoo.com' filehandle = urllib.urlopen(urlArg) for lines in filehandle.readlines(): m = re.search('http\://[\w\./]+/\w+',lines) if m: print m.group(0) filehandle.close()
[ [ 1, 0, 0.0667, 0.0667, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.1333, 0.0667, 0, 0.66, 0.2, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 14, 0, 0.2667, 0.0667, 0, 0....
[ "import urllib", "import re", "urlArg = 'http://www.yahoo.com'", "filehandle = urllib.urlopen(urlArg)", "for lines in filehandle.readlines():\n\tm = re.search('http\\://[\\w\\./]+/\\w+',lines)\n\tif m:\n\t\tprint(m.group(0))", "\tm = re.search('http\\://[\\w\\./]+/\\w+',lines)", "\tif m:\n\t\tprint(m.gr...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main program for Rietveld. This is also a template for running a Django app under Google App Engine, especially when using a newer version of Django than provided in the App Engine standard library. The site-specific code is all in other files: urls.py, models.py, views.py, settings.py. """ # Standard Python imports. import os import sys import logging # Log a message each time this module get loaded. logging.info('Loading %s, app version = %s', __name__, os.getenv('CURRENT_VERSION_ID')) import appengine_config # AppEngine imports. from google.appengine.ext.webapp import util # Import webapp.template. This makes most Django setup issues go away. from google.appengine.ext.webapp import template # Import various parts of Django. import django.core.handlers.wsgi import django.core.signals import django.db import django.dispatch.dispatcher import django.forms def log_exception(*args, **kwds): """Django signal handler to log an exception.""" cls, err = sys.exc_info()[:2] logging.exception('Exception in request: %s: %s', cls.__name__, err) # Log all exceptions detected by Django. django.core.signals.got_request_exception.connect(log_exception) # Unregister Django's default rollback event handler. django.core.signals.got_request_exception.disconnect( django.db._rollback_on_exception) # Create a Django application for WSGI. application = django.core.handlers.wsgi.WSGIHandler() def real_main(): """Main program.""" # Run the WSGI CGI handler with that application. util.run_wsgi_app(application) def profile_main(): """Main program for profiling.""" import cProfile import pstats import StringIO prof = cProfile.Profile() prof = prof.runctx('real_main()', globals(), locals()) stream = StringIO.StringIO() stats = pstats.Stats(prof, stream=stream) # stats.strip_dirs() # Don't; too many modules are named __init__.py. stats.sort_stats('time') # 'time', 'cumulative' or 'calls' stats.print_stats() # Optional arg: how many to print # The rest is optional. # stats.print_callees() # stats.print_callers() print '\n<hr>' print '<h1>Profile</h1>' print '<pre>' print stream.getvalue()[:1000000] print '</pre>' # Set this to profile_main to enable profiling. main = real_main if __name__ == '__main__': main()
[ [ 8, 0, 0.1881, 0.0891, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2574, 0.0099, 0, 0.66, 0.05, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.2673, 0.0099, 0, 0.66, ...
[ "\"\"\"Main program for Rietveld.\n\nThis is also a template for running a Django app under Google App\nEngine, especially when using a newer version of Django than provided\nin the App Engine standard library.\n\nThe site-specific code is all in other files: urls.py, models.py,\nviews.py, settings.py.", "import ...
# Removes duplicate nicknames (issue99). # # To run this script: # - Make sure App Engine library (incl. yaml) is in PYTHONPATH. # - Make sure that the remote API is included in app.yaml. # - Run "tools/appengine_console.py APP_ID". # - Import this module. # - update_accounts.run() updates accounts. # - Use the other two functions to fetch accounts or find duplicates # without any changes to the datastore. from google.appengine.ext import db from codereview import models def fetch_accounts(): query = models.Account.all() accounts = {} results = query.fetch(100) while results: last = None for account in results: if account.lower_nickname in accounts: accounts[account.lower_nickname].append(account) else: accounts[account.lower_nickname] = [account] last = account if last is None: break results = models.Account.all().filter('__key__ >', last.key()).fetch(100) return accounts def find_duplicates(accounts): tbd = [] while accounts: _, entries = accounts.popitem() if len(entries) > 1: # update accounts, except the fist: it's the lucky one for num, account in enumerate(entries[1:]): account.nickname = '%s%d' % (account.nickname, num+1) account.lower_nickname = account.nickname.lower() account.fresh = True # display "change nickname..." tbd.append(account) return tbd def run(): accounts = fetch_accounts() print '%d accounts fetched' % len(accounts) tbd = find_duplicates(accounts) print 'Updating %d accounts' % len(tbd) db.put(tbd) print 'Updated accounts:' for account in tbd: print ' %s' % account.email
[ [ 1, 0, 0.2097, 0.0161, 0, 0.66, 0, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 1, 0, 0.2419, 0.0161, 0, 0.66, 0.25, 844, 0, 1, 0, 0, 844, 0, 0 ], [ 2, 0, 0.4194, 0.2742, 0, 0....
[ "from google.appengine.ext import db", "from codereview import models", "def fetch_accounts():\n query = models.Account.all()\n accounts = {}\n results = query.fetch(100)\n while results:\n last = None\n for account in results:\n if account.lower_nickname in accounts:", " ...
"""Configuration.""" import logging import os import re from google.appengine.ext.appstats import recording logging.info('Loading %s from %s', __name__, __file__) # Custom webapp middleware to add Appstats. def webapp_add_wsgi_middleware(app): app = recording.appstats_wsgi_middleware(app) return app # Custom Appstats path normalization. def appstats_normalize_path(path): if path.startswith('/user/'): return '/user/X' if path.startswith('/user_popup/'): return '/user_popup/X' if '/diff/' in path: return '/X/diff/...' if '/diff2/' in path: return '/X/diff2/...' if '/patch/' in path: return '/X/patch/...' if path.startswith('/rss/'): i = path.find('/', 5) if i > 0: return path[:i] + '/X' return re.sub(r'\d+', 'X', path) # Segregate Appstats by runtime (python vs. python27). appstats_KEY_NAMESPACE = '__appstats_%s__' % os.getenv('APPENGINE_RUNTIME') # Django 1.2+ requires DJANGO_SETTINGS_MODULE environment variable to be set # http://code.google.com/appengine/docs/python/tools/libraries.html#Django os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' # NOTE: All "main" scripts must import webapp.template before django.
[ [ 8, 0, 0.0244, 0.0244, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0732, 0.0244, 0, 0.66, 0.1111, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.0976, 0.0244, 0, 0.66...
[ "\"\"\"Configuration.\"\"\"", "import logging", "import os", "import re", "from google.appengine.ext.appstats import recording", "logging.info('Loading %s from %s', __name__, __file__)", "def webapp_add_wsgi_middleware(app):\n app = recording.appstats_wsgi_middleware(app)\n return app", " app = rec...
# Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test utils.""" import os from google.appengine.ext import testbed from django.test import TestCase as _TestCase FILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files') class TestCase(_TestCase): """Customized Django TestCase. This class disables the setup of Django features that are not available on App Engine (e.g. fixture loading). And it initializes the Testbad class provided by the App Engine SDK. """ def _fixture_setup(self): # defined in django.test.TestCase pass def _fixture_teardown(self): # defined in django.test.TestCase pass def setUp(self): super(TestCase, self).setUp() self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub() self.testbed.init_user_stub() def tearDown(self): self.testbed.deactivate() super(TestCase, self).tearDown() def login(self, email): """Logs in a user identified by email.""" os.environ['USER_EMAIL'] = email def logout(self): """Logs the user out.""" os.environ['USER_EMAIL'] = '' def load_file(fname): """Read file and return it's content.""" return open(os.path.join(FILES_DIR, fname)).read()
[ [ 8, 0, 0.2381, 0.0159, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2698, 0.0159, 0, 0.66, 0.1667, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.3016, 0.0159, 0, 0.66...
[ "\"\"\"Test utils.\"\"\"", "import os", "from google.appengine.ext import testbed", "from django.test import TestCase as _TestCase", "FILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'files')", "class TestCase(_TestCase):\n \"\"\"Customized Django TestCase.\n\n This class disables th...
#!/usr/bin/env python # Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import code import getpass import logging import optparse import os import re import sys ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..') LIB = os.path.join(ROOT, '..', 'google_appengine', 'lib') sys.path.insert(0, os.path.join(ROOT, '..', 'google_appengine')) sys.path.append(os.path.join(LIB, 'django_1_2')) sys.path.append(os.path.join(LIB, 'fancy_urllib')) sys.path.append(os.path.join(LIB, 'simplejson')) sys.path.append(os.path.join(LIB, 'webob')) sys.path.append(os.path.join(LIB, 'yaml', 'lib')) sys.path.append(ROOT) from google.appengine.ext.remote_api import remote_api_stub import yaml def default_auth_func(): user = os.environ.get('EMAIL_ADDRESS') if user: print('User: %s' % user) else: user = raw_input('Username:') return user, getpass.getpass('Password:') def smart_auth_func(): """Try to guess first.""" try: return os.environ['EMAIL_ADDRESS'], open('.pwd').readline().strip() except (KeyError, IOError): return default_auth_func() def default_app_id(directory): return yaml.load(open(os.path.join(directory, 'app.yaml')))['application'] def setup_env(app_id, host=None, auth_func=None): """Setup remote access to a GAE instance.""" auth_func = auth_func or smart_auth_func host = host or '%s.appspot.com' % app_id # pylint: disable=W0612 from google.appengine.api import memcache from google.appengine.api.users import User from google.appengine.ext import db remote_api_stub.ConfigureRemoteDatastore( app_id, '/_ah/remote_api', auth_func, host) # Initialize environment. os.environ['SERVER_SOFTWARE'] = '' import appengine_config # Create shortcuts. import codereview from codereview import models, views # Symbols presented to the user. predefined_vars = locals().copy() del predefined_vars['appengine_config'] del predefined_vars['auth_func'] # Load all the models. for i in dir(models): if re.match(r'[A-Z][a-z]', i[:2]): predefined_vars[i] = getattr(models, i) return predefined_vars def main(): parser = optparse.OptionParser() parser.add_option('-v', '--verbose', action='count') options, args = parser.parse_args() if not args: app_id = default_app_id(ROOT) else: app_id = args[0] host = None if len(args) > 1: host = args[1] if options.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.ERROR) predefined_vars = setup_env(app_id, host) prompt = ( 'App Engine interactive console for "%s".\n' 'Available symbols:\n' ' %s\n') % (app_id, ', '.join(sorted(predefined_vars))) code.interact(prompt, None, predefined_vars) if __name__ == '__main__': sys.exit(main())
[ [ 1, 0, 0.1345, 0.0084, 0, 0.66, 0, 44, 0, 1, 0, 0, 44, 0, 0 ], [ 1, 0, 0.1429, 0.0084, 0, 0.66, 0.0435, 784, 0, 1, 0, 0, 784, 0, 0 ], [ 1, 0, 0.1513, 0.0084, 0, 0....
[ "import code", "import getpass", "import logging", "import optparse", "import os", "import re", "import sys", "ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')", "LIB = os.path.join(ROOT, '..', 'google_appengine', 'lib')", "sys.path.insert(0, os.path.join(ROOT, '..', 'google_a...
# Copyright 2008-2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Top-level URL mappings for Rietveld.""" # NOTE: Must import *, since Django looks for things here, e.g. handler500. from django.conf.urls.defaults import * # If you don't want to run Rietveld from the root level, add the # subdirectory as shown in the following example: # # url(r'subpath/', include('codereview.urls')), # urlpatterns = patterns( '', url(r'', include('codereview.urls')), )
[ [ 8, 0, 0.5357, 0.0357, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.6429, 0.0357, 0, 0.66, 0.5, 341, 0, 1, 0, 0, 341, 0, 0 ], [ 14, 0, 0.9464, 0.1429, 0, 0.66, ...
[ "\"\"\"Top-level URL mappings for Rietveld.\"\"\"", "from django.conf.urls.defaults import *", "urlpatterns = patterns(\n '',\n url(r'', include('codereview.urls')),\n )" ]
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Minimal Django settings.""" import os from google.appengine.api import app_identity # Banner for e.g. planned downtime announcements ## SPECIAL_BANNER = """\ ## Rietveld will be down for maintenance on ## Thursday November 17 ## from ## <a href="http://www.timeanddate.com/worldclock/fixedtime.html?iso=20111117T17&ah=6"> ## 17:00 - 23:00 UTC ## </a> ## """ APPEND_SLASH = False DEBUG = os.environ['SERVER_SOFTWARE'].startswith('Dev') INSTALLED_APPS = ( 'codereview', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.middleware.http.ConditionalGetMiddleware', 'codereview.middleware.AddUserToRequestMiddleware', 'codereview.middleware.PropagateExceptionMiddleware', ) ROOT_URLCONF = 'urls' TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', ) TEMPLATE_DEBUG = DEBUG TEMPLATE_DIRS = ( os.path.join(os.path.dirname(__file__), 'templates'), ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', ) FILE_UPLOAD_HANDLERS = ( 'django.core.files.uploadhandler.MemoryFileUploadHandler', ) FILE_UPLOAD_MAX_MEMORY_SIZE = 1048576 # 1 MB MEDIA_URL = '/static/' appid = app_identity.get_application_id() RIETVELD_INCOMING_MAIL_ADDRESS = ('reply@%s.appspotmail.com' % appid) RIETVELD_INCOMING_MAIL_MAX_SIZE = 500 * 1024 # 500K RIETVELD_REVISION = '<unknown>' try: RIETVELD_REVISION = open( os.path.join(os.path.dirname(__file__), 'REVISION') ).read() except: pass UPLOAD_PY_SOURCE = os.path.join(os.path.dirname(__file__), 'upload.py') # Default values for patch rendering DEFAULT_CONTEXT = 10 DEFAULT_COLUMN_WIDTH = 80 MIN_COLUMN_WIDTH = 3 MAX_COLUMN_WIDTH = 2000
[ [ 8, 0, 0.1948, 0.013, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2208, 0.013, 0, 0.66, 0.0417, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.2468, 0.013, 0, 0.66, ...
[ "\"\"\"Minimal Django settings.\"\"\"", "import os", "from google.appengine.api import app_identity", "APPEND_SLASH = False", "DEBUG = os.environ['SERVER_SOFTWARE'].startswith('Dev')", "INSTALLED_APPS = (\n 'codereview',\n)", "MIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import md5 from django.contrib.syndication.feeds import Feed from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse from django.utils.feedgenerator import Atom1Feed from codereview import library from codereview import models class BaseFeed(Feed): title = 'Code Review' description = 'Rietveld: Code Review Tool hosted on Google App Engine' feed_type = Atom1Feed def link(self): return reverse('codereview.views.index') def author_name(self): return 'rietveld' def item_guid(self, item): return 'urn:md5:%s' % (md5.new(str(item.key())).hexdigest()) def item_link(self, item): if isinstance(item, models.PatchSet): if item.data is not None: return reverse('codereview.views.download', args=[item.issue.key().id(),item.key().id()]) else: # Patch set is too large, only the splitted diffs are available. return reverse('codereview.views.show', args=[item.parent_key().id()]) if isinstance(item, models.Message): return '%s#msg-%s' % (reverse('codereview.views.show', args=[item.issue.key().id()]), item.key()) return reverse('codereview.views.show', args=[item.key().id()]) def item_title(self, item): return 'the title' def item_author_name(self, item): if isinstance(item, models.Issue): return library.get_nickname(item.owner, True) if isinstance(item, models.PatchSet): return library.get_nickname(item.issue.owner, True) if isinstance(item, models.Message): return library.get_nickname(item.sender, True) return 'Rietveld' def item_pubdate(self, item): if isinstance(item, models.Issue): return item.modified if isinstance(item, models.PatchSet): # Use created, not modified, so that commenting on # a patch set does not bump its place in the RSS feed. return item.created if isinstance(item, models.Message): return item.date return None class BaseUserFeed(BaseFeed): def get_object(self, bits): """Returns the account for the requested user feed. bits is a list of URL path elements. The first element of this list should be the user's nickname. A 404 is raised if the list is empty or has more than one element or if the a user with that nickname doesn't exist. """ if len(bits) != 1: raise ObjectDoesNotExist obj = bits[0] account = models.Account.get_account_for_nickname('%s' % obj) if account is None: raise ObjectDoesNotExist return account class ReviewsFeed(BaseUserFeed): title = 'Code Review - All issues I have to review' def items(self, obj): return _rss_helper(obj.email, 'closed = FALSE AND reviewers = :1', use_email=True) class ClosedFeed(BaseUserFeed): title = "Code Review - Reviews closed by me" def items(self, obj): return _rss_helper(obj.email, 'closed = TRUE AND owner = :1') class MineFeed(BaseUserFeed): title = 'Code Review - My issues' def items(self, obj): return _rss_helper(obj.email, 'closed = FALSE AND owner = :1') class AllFeed(BaseFeed): title = 'Code Review - All issues' def items(self): query = models.Issue.gql('WHERE closed = FALSE AND private = FALSE ' 'ORDER BY modified DESC') return query.fetch(RSS_LIMIT) class OneIssueFeed(BaseFeed): def link(self): return reverse('codereview.views.index') def get_object(self, bits): if len(bits) != 1: raise ObjectDoesNotExist obj = models.Issue.get_by_id(int(bits[0])) if obj: return obj raise ObjectDoesNotExist def title(self, obj): return 'Code review - Issue %d: %s' % (obj.key().id(), obj.subject) def items(self, obj): all = list(obj.patchset_set) + list(obj.message_set) all.sort(key=self.item_pubdate) return all ### RSS feeds ### # Maximum number of issues reported by RSS feeds RSS_LIMIT = 20 def _rss_helper(email, query_string, use_email=False): account = models.Account.get_account_for_email(email) if account is None: issues = [] else: query = models.Issue.gql('WHERE %s AND private = FALSE ' 'ORDER BY modified DESC' % query_string, use_email and account.email or account.user) issues = query.fetch(RSS_LIMIT) return issues
[ [ 1, 0, 0.092, 0.0061, 0, 0.66, 0, 604, 0, 1, 0, 0, 604, 0, 0 ], [ 1, 0, 0.1043, 0.0061, 0, 0.66, 0.0667, 211, 0, 1, 0, 0, 211, 0, 0 ], [ 1, 0, 0.1104, 0.0061, 0, 0...
[ "import md5", "from django.contrib.syndication.feeds import Feed", "from django.core.exceptions import ObjectDoesNotExist", "from django.core.urlresolvers import reverse", "from django.utils.feedgenerator import Atom1Feed", "from codereview import library", "from codereview import models", "class Base...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Intra-region diff utilities. Intra-region diff highlights the blocks of code which have been changed or deleted within a region. So instead of highlighting the whole region marked as changed, the user can see what exactly was changed within that region. Terminology: 'region' is a list of consecutive code lines. 'word' is the unit of intra-region diff. Its definition is arbitrary based on what we think as to be a good unit of difference between two regions. 'block' is a small section of code within a region. It can span multiple lines. There can be multiple non overlapping blocks within a region. A block can potentially span the whole region. The blocks have two representations. One is of the format (offset1, offset2, size) which is returned by the SequenceMatcher to indicate a match of length 'size' starting at offset1 in the first/old line and starting at offset2 in the second/new line. We convert this representation to a pair of tuples i.e. (offset1, size) and (offset2, size) for rendering each side of the diff separately. This latter representation is also more efficient for doing compaction of adjacent blocks which reduces the size of the HTML markup. See CompactBlocks for more details. SequenceMatcher always returns one special matching block at the end with contents (len(line1), len(line2), 0). We retain this special block as it simplifies for loops in rendering the last non-matching block. All functions which deal with the sequence of blocks assume presence of the special block at the end of the sequence and retain it. """ import cgi import difflib import re # Tag to begin a diff chunk. BEGIN_TAG = "<span class=\"%s\">" # Tag to end a diff block. END_TAG = "</span>" # Tag used for visual tab indication. TAB_TAG = "<span class=\"visualtab\">&raquo;</span>" # Color scheme to govern the display properties of diff blocks and matching # blocks. Each value e.g. 'oldlight' corresponds to a CSS style. COLOR_SCHEME = { 'old': { 'match': 'oldlight', 'diff': 'olddark', 'bckgrnd': 'oldlight', }, 'new': { 'match': 'newlight', 'diff': 'newdark', 'bckgrnd': 'newlight', }, 'oldmove': { 'match': 'movelight', 'diff': 'oldmovedark', 'bckgrnd': 'movelight' }, 'newmove': { 'match': 'newlight', 'diff': 'newdark', 'bckgrnd': 'newlight' }, } # Regular expressions to tokenize lines. Default is 'd'. EXPRS = { 'a': r'(\w+|[^\w\s]+|\s+)', 'b': r'([A-Za-z0-9]+|[^A-Za-z0-9])', 'c': r'([A-Za-z0-9_]+|[^A-Za-z0-9_])', 'd': r'([^\W_]+|[\W_])', } # Maximum total characters in old and new lines for doing intra-region diffs. # Intra-region diff for larger regions is hard to comprehend and wastes CPU # time. MAX_TOTAL_LEN = 10000 def _ExpandTabs(text, column, tabsize, mark_tabs=False): """Expand tab characters in a string into spaces. Args: text: a string containing tab characters. column: the initial column for the first character in text tabsize: tab stops occur at columns that are multiples of tabsize mark_tabs: if true, leave a tab character as the first character of the expansion, so that the caller can find where the tabs were. Note that calling _ExpandTabs with mark_tabs=True is not idempotent. """ expanded = "" while True: tabpos = text.find("\t") if tabpos < 0: break fillwidth = tabsize - (tabpos + column) % tabsize column += tabpos + fillwidth if mark_tabs: fill = "\t" + " " * (fillwidth - 1) else: fill = " " * fillwidth expanded += text[0:tabpos] + fill text = text[tabpos+1:] return expanded + text def Break(text, offset=0, limit=80, brk="\n ", tabsize=8, mark_tabs=False): """Break text into lines. Break text, which begins at column offset, each time it reaches column limit. To break the text, insert brk, which does not count toward the column count of the next line and is assumed to be valid HTML. During the text breaking process, replaces tabs with spaces up to the next column that is a multiple of tabsize. If mark_tabs is true, replace the first space of each expanded tab with TAB_TAG. Input and output are assumed to be in UTF-8; the computation is done in Unicode. (Still not good enough if zero-width characters are present.) If the input is not valid UTF-8, then the encoding is passed through, potentially breaking up multi-byte characters. We pass the line through cgi.escape before returning it. A trailing newline is always stripped from the input first. """ assert tabsize > 0, tabsize if text.endswith("\n"): text = text[:-1] try: text = unicode(text, "utf-8") except: pass # Expand all tabs. # If mark_tabs is true, we retain one \t character as a marker during # expansion so that we later replace it with an HTML snippet. text = _ExpandTabs(text, offset, tabsize, mark_tabs) # Perform wrapping. if len(text) > limit - offset: parts, text = [text[0:limit-offset]], text[limit-offset:] while len(text) > limit: parts.append(text[0:limit]) text = text[limit:] parts.append(text) text = brk.join([cgi.escape(p) for p in parts]) else: text = cgi.escape(text) # Colorize tab markers text = text.replace("\t", TAB_TAG) if isinstance(text, unicode): return text.encode("utf-8", "replace") return text def CompactBlocks(blocks): """Compacts adjacent code blocks. In many cases 2 adjacent blocks can be merged into one. This allows to do some further processing on those blocks. Args: blocks: [(offset1, size), ...] Returns: A list with the same structure as the input with adjacent blocks merged. However, the last block (which is always assumed to have a zero size) is never merged. For example, the input [(0, 2), (2, 8), (10, 5), (15, 0)] will produce the output [(0, 15), (15, 0)]. """ if len(blocks) == 1: return blocks result = [blocks[0]] for block in blocks[1:-1]: last_start, last_len = result[-1] curr_start, curr_len = block if last_start + last_len == curr_start: result[-1] = last_start, last_len + curr_len else: result.append(block) result.append(blocks[-1]) return result def FilterBlocks(blocks, filter_func): """Gets rid of any blocks if filter_func evaluates false for them. Args: blocks: [(offset1, offset2, size), ...]; must have at least 1 entry filter_func: a boolean function taking a single argument of the form (offset1, offset2, size) Returns: A list with the same structure with entries for which filter_func() returns false removed. However, the last block is always included. """ # We retain the 'special' block at the end. res = [b for b in blocks[:-1] if filter_func(b)] res.append(blocks[-1]) return res def GetDiffParams(expr='d', min_match_ratio=0.6, min_match_size=2, dbg=False): """Returns a tuple of various parameters which affect intra region diffs. Args: expr: regular expression id to use to identify 'words' in the intra region diff min_match_ratio: minimum similarity between regions to qualify for intra region diff min_match_size: the smallest matching block size to use. Blocks smaller than this are ignored. dbg: to turn on generation of debugging information for the diff Returns: 4 tuple (expr, min_match_ratio, min_match_size, dbg) that can be used to customize diff. It can be passed to functions like WordDiff and IntraLineDiff. """ assert expr in EXPRS assert min_match_size in xrange(1, 5) assert min_match_ratio > 0.0 and min_match_ratio < 1.0 return (expr, min_match_ratio, min_match_size, dbg) def CanDoIRDiff(old_lines, new_lines): """Tells if it would be worth computing the intra region diff. Calculating IR diff is costly and is usually helpful only for small regions. We use a heuristic that if the total number of characters is more than a certain threshold then we assume it is not worth computing the IR diff. Args: old_lines: an array of strings containing old text new_lines: an array of strings containing new text Returns: True if we think it is worth computing IR diff for the region defined by old_lines and new_lines, False otherwise. TODO: Let GetDiffParams handle MAX_TOTAL_LEN param also. """ total_chars = (sum(len(line) for line in old_lines) + sum(len(line) for line in new_lines)) return total_chars <= MAX_TOTAL_LEN def WordDiff(line1, line2, diff_params): """Returns blocks with positions indiciating word level diffs. Args: line1: string representing the left part of the diff line2: string representing the right part of the diff diff_params: return value of GetDiffParams Returns: A tuple (blocks, ratio) where: blocks: [(offset1, offset2, size), ...] such that line1[offset1:offset1+size] == line2[offset2:offset2+size] and the last block is always (len(line1), len(line2), 0) ratio: a float giving the diff ratio computed by SequenceMatcher. """ match_expr, min_match_ratio, min_match_size, _ = diff_params exp = EXPRS[match_expr] # Strings may have been left undecoded up to now. Assume UTF-8. try: line1 = unicode(line1, "utf8") except: pass try: line2 = unicode(line2, "utf8") except: pass a = re.findall(exp, line1, re.U) b = re.findall(exp, line2, re.U) s = difflib.SequenceMatcher(None, a, b) matching_blocks = s.get_matching_blocks() ratio = s.ratio() # Don't show intra region diffs if both lines are too different and there is # more than one block of difference. If there is only one change then we # still show the intra region diff regardless of how different the blocks # are. # Note: We compare len(matching_blocks) with 3 because one block of change # results in 2 matching blocks. We add the one special block and we get 3 # matching blocks per one block of change. if ratio < min_match_ratio and len(matching_blocks) > 3: return ([(0, 0, 0)], ratio) # For now convert to character level blocks because we already have # the code to deal with folding across lines for character blocks. # Create arrays lena an lenb which have cumulative word lengths # corresponding to word positions in a and b lena = [] last = 0 for w in a: lena.append(last) last += len(w) lenb = [] last = 0 for w in b: lenb.append(last) last += len(w) lena.append(len(line1)) lenb.append(len(line2)) # Convert to character blocks blocks = [] for s1, s2, blen in matching_blocks[:-1]: apos = lena[s1] bpos = lenb[s2] block_len = lena[s1+blen] - apos blocks.append((apos, bpos, block_len)) # Recreate the special block. blocks.append((len(line1), len(line2), 0)) # Filter any matching blocks which are smaller than the desired threshold. # We don't remove matching blocks with only a newline character as doing so # results in showing the matching newline character as non matching which # doesn't look good. blocks = FilterBlocks(blocks, lambda b: (b[2] >= min_match_size or line1[b[0]:b[0]+b[2]] == '\n')) return (blocks, ratio) def IntraLineDiff(line1, line2, diff_params, diff_func=WordDiff): """Computes intraline diff blocks. Args: line1: string representing the left part of the diff line2: string representing the right part of the diff diff_params: return value of GetDiffParams diff_func: a function whose signature matches that of WordDiff() above Returns: A tuple of (blocks1, blocks2) corresponding to line1 and line2. Each element of the tuple is an array of (start_pos, length) tuples denoting a diff block. """ blocks, ratio = diff_func(line1, line2, diff_params) blocks1 = [(start1, length) for (start1, start2, length) in blocks] blocks2 = [(start2, length) for (start1, start2, length) in blocks] return (blocks1, blocks2, ratio) def DumpDiff(blocks, line1, line2): """Helper function to debug diff related problems. Args: blocks: [(offset1, offset2, size), ...] line1: string representing the left part of the diff line2: string representing the right part of the diff """ for offset1, offset2, size in blocks: print offset1, offset2, size print offset1, size, ": ", line1[offset1:offset1+size] print offset2, size, ": ", line2[offset2:offset2+size] def RenderIntraLineDiff(blocks, line, tag, dbg_info=None, limit=80, indent=5, tabsize=8, mark_tabs=False): """Renders the diff blocks returned by IntraLineDiff function. Args: blocks: [(start_pos, size), ...] line: line of code on which the blocks are to be rendered. tag: 'new' or 'old' to control the color scheme. dbg_info: a string that holds debugging informaion header. Debug information is rendered only if dbg_info is not None. limit: folding limit to be passed to the Break function. indent: indentation size to be passed to the Break function. tabsize: tab stops occur at columns that are multiples of tabsize mark_tabs: if True, mark the first character of each expanded tab visually Returns: A tuple of two elements. First element is the rendered version of the input 'line'. Second element tells if the line has a matching newline character. """ res = "" prev_start, prev_len = 0, 0 has_newline = False debug_info = dbg_info if dbg_info: debug_info += "\nBlock Count: %d\nBlocks: " % (len(blocks) - 1) for curr_start, curr_len in blocks: if dbg_info and curr_len > 0: debug_info += Break( "\n(%d, %d):|%s|" % (curr_start, curr_len, line[curr_start:curr_start+curr_len]), limit, indent, tabsize, mark_tabs) res += FoldBlock(line, prev_start + prev_len, curr_start, limit, indent, tag, 'diff', tabsize, mark_tabs) res += FoldBlock(line, curr_start, curr_start + curr_len, limit, indent, tag, 'match', tabsize, mark_tabs) # TODO: This test should be out of loop rather than inside. Once we # filter out some junk from blocks (e.g. some empty blocks) we should do # this test only on the last matching block. if line[curr_start:curr_start+curr_len].endswith('\n'): has_newline = True prev_start, prev_len = curr_start, curr_len return (res, has_newline, debug_info) def FoldBlock(src, start, end, limit, indent, tag, btype, tabsize=8, mark_tabs=False): """Folds and renders a block. Args: src: line of code start: starting position of the block within 'src'. end: ending position of the block within 'src'. limit: folding limit indent: indentation to use for folding. tag: 'new' or 'old' to control the color scheme. btype: block type i.e. 'match' or 'diff' to control the color schme. tabsize: tab stops occur at columns that are multiples of tabsize mark_tabs: if True, mark the first character of each expanded tab visually Returns: A string representing the rendered block. """ text = src[start:end] # We ignore newlines because we do newline management ourselves. # Any other new lines with at the end will be stripped off by the Break # method. if start >= end or text == '\n': return "" fbegin, lend, nl_plus_indent = GetTags(tag, btype, indent) # 'bol' is beginning of line. # The text we care about begins at byte offset start # but if there are tabs it will have a larger column # offset. Use len(_ExpandTabs()) to find out how many # columns the starting prefix occupies. offset_from_bol = len(_ExpandTabs(src[0:start], 0, tabsize)) % limit brk = lend + nl_plus_indent + fbegin text = Break(text, offset_from_bol, limit, brk, tabsize, mark_tabs) if text: text = fbegin + text + lend # If this is the first block of the line and this is not the first line then # insert newline + indent. if offset_from_bol == 0 and not start == 0: text = nl_plus_indent + text return text def GetTags(tag, btype, indent): """Returns various tags for rendering diff blocks. Args: tag: a key from COLOR_SCHEME btype: 'match' or 'diff' indent: indentation to use Returns A 3 tuple (begin_tag, end_tag, formatted_indent_block) """ assert tag in COLOR_SCHEME assert btype in ['match', 'diff'] fbegin = BEGIN_TAG % COLOR_SCHEME[tag][btype] bbegin = BEGIN_TAG % COLOR_SCHEME[tag]['bckgrnd'] lend = END_TAG nl_plus_indent = '\n' if indent > 0: nl_plus_indent += bbegin + cgi.escape(" "*indent) + lend return fbegin, lend, nl_plus_indent def ConvertToSingleLine(lines): """Transforms a sequence of strings into a single line. Returns the state that can be used to reconstruct the original lines with the newline separators placed at the original place. Args: lines: sequence of strings Returns: Returns (single_line, state) tuple. 'state' shouldn't be modified by the caller. It is only used to pass to other functions which will do certain operations on this state. 'state' is an array containing a dictionary for each item in lines. Each dictionary has two elements 'pos' and 'blocks'. 'pos' is the end position of each line in the final converted string. 'blocks' is an array of blocks for each line of code. These blocks are added using MarkBlock function. """ state = [] total_length = 0 for l in lines: total_length += len(l) # TODO: Use a tuple instead. state.append({'pos': total_length, # the line split point 'blocks': [], # blocks which belong to this line }) result = "".join(lines) assert len(state) == len(lines) return (result, state) def MarkBlock(state, begin, end): """Marks a block on a region such that it doesn't cross line boundaries. It is an operation that can be performed on the single line which was returned by the ConvertToSingleLine function. This operation marks arbitrary block [begin,end) on the text. It also ensures that if [begin,end) crosses line boundaries in the original region then it splits the section up in 2 or more blocks such that no block crosses the boundaries. Args: state: the state returned by ConvertToSingleLine function. The state contained is modified by this function. begin: Beginning of the block. end: End of the block (exclusive). Returns: None. """ # TODO: Make sure already existing blocks don't overlap if begin == end: return last_pos = 0 for entry in state: pos = entry['pos'] if begin >= last_pos and begin < pos: if end < pos: # block doesn't cross any line boundary entry['blocks'].append((begin, end)) else: # block crosses the line boundary entry['blocks'].append((begin, pos)) MarkBlock(state, pos, end) break last_pos = pos def GetBlocks(state): """Returns all the blocks corresponding to the lines in the region. Args: state: the state returned by ConvertToSingleLine(). Returns: An array of [(start_pos, length), ..] with an entry for each line in the region. """ result = [] last_pos = 0 for entry in state: pos = entry['pos'] # Calculate block start points from the beginning of individual lines. blocks = [(s[0]-last_pos, s[1]-s[0]) for s in entry['blocks']] # Add one end marker block. blocks.append((pos-last_pos, 0)) result.append(blocks) last_pos = pos return result def IntraRegionDiff(old_lines, new_lines, diff_params): """Computes intra region diff. Args: old_lines: array of strings new_lines: array of strings diff_params: return value of GetDiffParams Returns: A tuple (old_blocks, new_blocks) containing matching blocks for old and new lines. """ old_line, old_state = ConvertToSingleLine(old_lines) new_line, new_state = ConvertToSingleLine(new_lines) old_blocks, new_blocks, ratio = IntraLineDiff(old_line, new_line, diff_params) for begin, length in old_blocks: MarkBlock(old_state, begin, begin+length) old_blocks = GetBlocks(old_state) for begin, length in new_blocks: MarkBlock(new_state, begin, begin+length) new_blocks = GetBlocks(new_state) return (old_blocks, new_blocks, ratio) def NormalizeBlocks(blocks, line): """Normalizes block representation of an intra line diff. One diff can have multiple representations. Some times the diff returned by the difflib for similar text sections is different even within same region. For example if 2 already indented lines were indented with one additional space character, the difflib may return the non matching space character to be any of the already existing spaces. So one line may show non matching space character as the first space character and the second line may show it to be the last space character. This is sometimes confusing. This is the side effect of the new regular expression we are using in WordDiff for identifying indvidual words. This regular expression ('b') treats a sequence of punctuation and whitespace characters as individual characters. It has some visual advantages for showing a character level punctuation change as one character change rather than a group of character change. Making the normalization too generic can have performance implications. So this implementation of normalize blocks intends to handle only one case. Let's say S represents the space character and () marks a matching block. Then the normalize operation will do following: SSSS(SS)(ABCD) => SSSS(SS)(ABCD) (SS)SSSS(ABCD) => SSSS(SS)(ABCD) (SSSS)SS(ABCD) => SS(SSSS)(ABCD) and so on.. Args: blocks: An array of (offset, len) tuples defined on 'line'. These blocks mark the matching areas. Anything between these matching blocks is considered non-matching. line: The text string on which the blocks are defined. Returns: An array of (offset, len) tuples representing the same diff but in normalized form. """ result = [] prev_start, prev_len = blocks[0] for curr_start, curr_len in blocks[1:]: # Note: nm_ is a prefix for non matching and m_ is a prefix for matching. m_len, nm_len = prev_len, curr_start - (prev_start+prev_len) # This if condition checks if matching and non matching parts are greater # than zero length and are comprised of spaces ONLY. The last condition # deals with most of the observed cases of strange diffs. # Note: curr_start - prev_start == m_l + nm_l # So line[prev_start:curr_start] == matching_part + non_matching_part. text = line[prev_start:curr_start] if m_len > 0 and nm_len > 0 and text == ' ' * len(text): # Move the matching block towards the end i.e. normalize. result.append((prev_start + nm_len, m_len)) else: # Keep the existing matching block. result.append((prev_start, prev_len)) prev_start, prev_len = curr_start, curr_len result.append(blocks[-1]) assert len(result) == len(blocks) return result def RenderIntraRegionDiff(lines, diff_blocks, tag, ratio, limit=80, indent=5, tabsize=8, mark_tabs=False, dbg=False): """Renders intra region diff for one side. Args: lines: list of strings representing source code in the region diff_blocks: blocks that were returned for this region by IntraRegionDiff() tag: 'new' or 'old' ratio: similarity ratio returned by the diff computing function limit: folding limit indent: indentation size tabsize: tab stops occur at columns that are multiples of tabsize mark_tabs: if True, mark the first character of each expanded tab visually dbg: indicates if debug information should be rendered Returns: A list of strings representing the rendered version of each item in input 'lines'. """ result = [] dbg_info = None if dbg: dbg_info = 'Ratio: %.1f' % ratio for line, blocks in zip(lines, diff_blocks): blocks = NormalizeBlocks(blocks, line) blocks = CompactBlocks(blocks) diff = RenderIntraLineDiff(blocks, line, tag, dbg_info=dbg_info, limit=limit, indent=indent, tabsize=tabsize, mark_tabs=mark_tabs) result.append(diff) assert len(result) == len(lines) return result
[ [ 8, 0, 0.0417, 0.0417, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0647, 0.0014, 0, 0.66, 0.037, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 1, 0, 0.0661, 0.0014, 0, 0.66,...
[ "\"\"\"Intra-region diff utilities.\n\nIntra-region diff highlights the blocks of code which have been changed or\ndeleted within a region. So instead of highlighting the whole region marked as\nchanged, the user can see what exactly was changed within that region.\n\nTerminology:\n 'region' is a list of consecuti...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility to read and apply a unified diff without forking patch(1). For a discussion of the unified diff format, see my blog on Artima: http://www.artima.com/weblogs/viewpost.jsp?thread=164293 """ import difflib import logging import re import sys _CHUNK_RE = re.compile(r""" @@ \s+ - (?: (\d+) (?: , (\d+) )?) \s+ \+ (?: (\d+) (?: , (\d+) )?) \s+ @@ """, re.VERBOSE) def PatchLines(old_lines, patch_lines, name="<patch>"): """Patches the old_lines with patches read from patch_lines. This only reads unified diffs. The header lines are ignored. Yields (tag, old, new) tuples where old and new are lists of lines. The tag can either start with "error" or be a tag from difflib: "equal", "insert", "delete", "replace". After "error" is yielded, no more tuples are yielded. It is possible that consecutive "equal" tuples are yielded. """ chunks = ParsePatchToChunks(patch_lines, name) if chunks is None: return iter([("error: ParsePatchToChunks failed", [], [])]) return PatchChunks(old_lines, chunks) def PatchChunks(old_lines, chunks): """Patche old_lines with chunks. Yields (tag, old, new) tuples where old and new are lists of lines. The tag can either start with "error" or be a tag from difflib: "equal", "insert", "delete", "replace". After "error" is yielded, no more tuples are yielded. It is possible that consecutive "equal" tuples are yielded. """ if not chunks: # The patch is a no-op yield ("equal", old_lines, old_lines) return old_pos = 0 for (old_i, old_j), _, old_chunk, new_chunk in chunks: eq = old_lines[old_pos:old_i] if eq: yield "equal", eq, eq old_pos = old_i # Check that the patch matches the target file if old_lines[old_i:old_j] != old_chunk: logging.warn("mismatch:%s.%s.", old_lines[old_i:old_j], old_chunk) yield ("error: old chunk mismatch", old_lines[old_i:old_j], old_chunk) return # TODO(guido): ParsePatch knows the diff details, but throws the info away sm = difflib.SequenceMatcher(None, old_chunk, new_chunk) for tag, i1, i2, j1, j2 in sm.get_opcodes(): yield tag, old_chunk[i1:i2], new_chunk[j1:j2] old_pos = old_j # Copy the final matching chunk if any. eq = old_lines[old_pos:] if eq: yield ("equal", eq, eq) def ParseRevision(lines): """Parse the revision number out of the raw lines of the patch. Returns 0 (new file) if no revision number was found. """ for line in lines[:10]: if line.startswith('@'): break m = re.match(r'---\s.*\(.*\s(\d+)\)\s*$', line) if m: return int(m.group(1)) return 0 _NO_NEWLINE_MESSAGE = "\\ No newline at end of file" def ParsePatchToChunks(lines, name="<patch>"): """Parses a patch from a list of lines. Return a list of chunks, where each chunk is a tuple: old_range, new_range, old_lines, new_lines Returns a list of chunks (possibly empty); or None if there's a problem. """ lineno = 0 raw_chunk = [] chunks = [] old_range = new_range = None old_last = new_last = 0 in_prelude = True for line in lines: lineno += 1 if in_prelude: # Skip leading lines until after we've seen one starting with '+++' if line.startswith("+++"): in_prelude = False continue match = _CHUNK_RE.match(line) if match: if raw_chunk: # Process the lines in the previous chunk old_chunk = [] new_chunk = [] for tag, rest in raw_chunk: if tag in (" ", "-"): old_chunk.append(rest) if tag in (" ", "+"): new_chunk.append(rest) # Check consistency old_i, old_j = old_range new_i, new_j = new_range if len(old_chunk) != old_j - old_i or len(new_chunk) != new_j - new_i: logging.warn("%s:%s: previous chunk has incorrect length", name, lineno) return None chunks.append((old_range, new_range, old_chunk, new_chunk)) raw_chunk = [] # Parse the @@ header old_ln, old_n, new_ln, new_n = match.groups() old_ln, old_n, new_ln, new_n = map(long, (old_ln, old_n or 1, new_ln, new_n or 1)) # Convert the numbers to list indices we can use if old_n == 0: old_i = old_ln else: old_i = old_ln - 1 old_j = old_i + old_n old_range = old_i, old_j if new_n == 0: new_i = new_ln else: new_i = new_ln - 1 new_j = new_i + new_n new_range = new_i, new_j # Check header consistency with previous header if old_i < old_last or new_i < new_last: logging.warn("%s:%s: chunk header out of order: %r", name, lineno, line) return None if old_i - old_last != new_i - new_last: logging.warn("%s:%s: inconsistent chunk header: %r", name, lineno, line) return None old_last = old_j new_last = new_j else: tag, rest = line[0], line[1:] if tag in (" ", "-", "+"): raw_chunk.append((tag, rest)) elif line.startswith(_NO_NEWLINE_MESSAGE): # TODO(guido): need to check that no more lines follow for this file if raw_chunk: last_tag, last_rest = raw_chunk[-1] if last_rest.endswith("\n"): raw_chunk[-1] = (last_tag, last_rest[:-1]) else: # Only log if it's a non-blank line. Blank lines we see a lot. if line and line.strip(): logging.warn("%s:%d: indecypherable input: %r", name, lineno, line) if chunks or raw_chunk: break # Trailing garbage isn't so bad return None if raw_chunk: # Process the lines in the last chunk old_chunk = [] new_chunk = [] for tag, rest in raw_chunk: if tag in (" ", "-"): old_chunk.append(rest) if tag in (" ", "+"): new_chunk.append(rest) # Check consistency old_i, old_j = old_range new_i, new_j = new_range if len(old_chunk) != old_j - old_i or len(new_chunk) != new_j - new_i: print >> sys.stderr, ("%s:%s: last chunk has incorrect length" % (name, lineno)) return None chunks.append((old_range, new_range, old_chunk, new_chunk)) raw_chunk = [] return chunks def ParsePatchToLines(lines): """Parses a patch from a list of lines. Returns None on error, otherwise a list of 3-tuples: (old_line_no, new_line_no, line) A line number can be 0 if it doesn't exist in the old/new file. """ # TODO: can we share some of this code with ParsePatchToChunks? result = [] in_prelude = True for line in lines: if in_prelude: result.append((0, 0, line)) # Skip leading lines until after we've seen one starting with '+++' if line.startswith("+++"): in_prelude = False elif line.startswith("@"): result.append((0, 0, line)) match = _CHUNK_RE.match(line) if not match: logging.warn("ParsePatchToLines match failed on %s", line) return None old_ln = int(match.groups()[0]) new_ln = int(match.groups()[2]) else: if line[0] == "-": result.append((old_ln, 0, line)) old_ln += 1 elif line[0] == "+": result.append((0, new_ln, line)) new_ln += 1 elif line[0] == " ": result.append((old_ln, new_ln, line)) old_ln += 1 new_ln += 1 elif line.startswith(_NO_NEWLINE_MESSAGE): continue else: # Something else, could be property changes etc. result.append((0, 0, line)) return result
[ [ 8, 0, 0.0656, 0.0193, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0811, 0.0039, 0, 0.66, 0.0909, 866, 0, 1, 0, 0, 866, 0, 0 ], [ 1, 0, 0.0849, 0.0039, 0, 0.66...
[ "\"\"\"Utility to read and apply a unified diff without forking patch(1).\n\nFor a discussion of the unified diff format, see my blog on Artima:\nhttp://www.artima.com/weblogs/viewpost.jsp?thread=164293\n\"\"\"", "import difflib", "import logging", "import re", "import sys", "_CHUNK_RE = re.compile(r\"\"\...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """URL mappings for the codereview package.""" # NOTE: Must import *, since Django looks for things here, e.g. handler500. from django.conf.urls.defaults import * import django.views.defaults from codereview import feeds urlpatterns = patterns( 'codereview.views', (r'^$', 'index'), (r'^all$', 'all'), (r'^mine$', 'mine'), (r'^starred$', 'starred'), (r'^new$', 'new'), (r'^upload$', 'upload'), (r'^(\d+)$', 'show', {}, 'show_bare_issue_number'), (r'^(\d+)/(show)?$', 'show'), (r'^(\d+)/add$', 'add'), (r'^(\d+)/edit$', 'edit'), (r'^(\d+)/delete$', 'delete'), (r'^(\d+)/close$', 'close'), (r'^(\d+)/mail$', 'mailissue'), (r'^(\d+)/publish$', 'publish'), (r'^download/issue(\d+)_(\d+)\.diff', 'download'), (r'^download/issue(\d+)_(\d+)_(\d+)\.diff', 'download_patch'), (r'^(\d+)/patch/(\d+)/(\d+)$', 'patch'), (r'^(\d+)/image/(\d+)/(\d+)/(\d+)$', 'image'), (r'^(\d+)/diff/(\d+)/(.+)$', 'diff'), (r'^(\d+)/diff2/(\d+):(\d+)/(.+)$', 'diff2'), (r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$', 'diff_skipped_lines'), (r'^(\d+)/diff_skipped_lines/(\d+)/(\d+)/$', django.views.defaults.page_not_found, {}, 'diff_skipped_lines_prefix'), (r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/(\d+)/(\d+)/([tba])/(\d+)$', 'diff2_skipped_lines'), (r'^(\d+)/diff2_skipped_lines/(\d+):(\d+)/(\d+)/$', django.views.defaults.page_not_found, {}, 'diff2_skipped_lines_prefix'), (r'^(\d+)/upload_content/(\d+)/(\d+)$', 'upload_content'), (r'^(\d+)/upload_patch/(\d+)$', 'upload_patch'), (r'^(\d+)/upload_complete/(\d+)?$', 'upload_complete'), (r'^(\d+)/description$', 'description'), (r'^(\d+)/fields', 'fields'), (r'^(\d+)/star$', 'star'), (r'^(\d+)/unstar$', 'unstar'), (r'^(\d+)/draft_message$', 'draft_message'), (r'^api/(\d+)/?$', 'api_issue'), (r'^api/(\d+)/(\d+)/?$', 'api_patchset'), (r'^user/(.+)$', 'show_user'), (r'^inline_draft$', 'inline_draft'), (r'^repos$', 'repos'), (r'^repo_new$', 'repo_new'), (r'^repo_init$', 'repo_init'), (r'^branch_new/(\d+)$', 'branch_new'), (r'^branch_edit/(\d+)$', 'branch_edit'), (r'^branch_delete/(\d+)$', 'branch_delete'), (r'^settings$', 'settings'), (r'^account_delete$', 'account_delete'), (r'^migrate_entities$', 'migrate_entities'), (r'^user_popup/(.+)$', 'user_popup'), (r'^(\d+)/patchset/(\d+)$', 'patchset'), (r'^(\d+)/patchset/(\d+)/delete$', 'delete_patchset'), (r'^account$', 'account'), (r'^use_uploadpy$', 'use_uploadpy'), (r'^_ah/xmpp/message/chat/', 'incoming_chat'), (r'^_ah/mail/(.*)', 'incoming_mail'), (r'^xsrf_token$', 'xsrf_token'), # patching upload.py on the fly (r'^static/upload.py$', 'customized_upload_py'), (r'^search$', 'search'), (r'^tasks/calculate_delta$', 'calculate_delta'), (r'^tasks/migrate_entities$', 'task_migrate_entities'), ) feed_dict = { 'reviews': feeds.ReviewsFeed, 'closed': feeds.ClosedFeed, 'mine' : feeds.MineFeed, 'all': feeds.AllFeed, 'issue' : feeds.OneIssueFeed, } urlpatterns += patterns( '', (r'^rss/(?P<url>.*)$', 'django.contrib.syndication.views.feed', {'feed_dict': feed_dict}), )
[ [ 8, 0, 0.1485, 0.0099, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1782, 0.0099, 0, 0.66, 0.2, 341, 0, 1, 0, 0, 341, 0, 0 ], [ 1, 0, 0.1881, 0.0099, 0, 0.66, ...
[ "\"\"\"URL mappings for the codereview package.\"\"\"", "from django.conf.urls.defaults import *", "import django.views.defaults", "from codereview import feeds", "urlpatterns = patterns(\n 'codereview.views',\n (r'^$', 'index'),\n (r'^all$', 'all'),\n (r'^mine$', 'mine'),\n (r'^starred$', 's...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Custom middleware. Some of this may be generally useful.""" import logging from google.appengine.api import users from google.appengine.runtime import apiproxy_errors from google.appengine.runtime import DeadlineExceededError from django.conf import settings from django.http import Http404, HttpResponse from django.template import Context, loader from codereview import models class AddUserToRequestMiddleware(object): """Add a user object and a user_is_admin flag to each request.""" def process_request(self, request): request.user = users.get_current_user() request.user_is_admin = users.is_current_user_admin() # Update the cached value of the current user's Account account = None if request.user is not None: account = models.Account.get_account_for_user(request.user) models.Account.current_user_account = account class PropagateExceptionMiddleware(object): """Catch exceptions, log them and return a friendly error message. Disables itself in DEBUG mode. """ def _text_requested(self, request): """Returns True if a text/plain response is requested.""" # We could use a better heuristics that takes multiple # media_ranges and quality factors into account. For now we return # True iff 'text/plain' is the only media range the request # accepts. media_ranges = request.META.get('HTTP_ACCEPT', '').split(',') return len(media_ranges) == 1 and media_ranges[0] == 'text/plain' def process_exception(self, request, exception): if settings.DEBUG or isinstance(exception, Http404): return None if isinstance(exception, apiproxy_errors.CapabilityDisabledError): msg = ('Rietveld: App Engine is undergoing maintenance. ' 'Please try again in a while.') status = 503 elif isinstance(exception, (DeadlineExceededError, MemoryError)): msg = ('Rietveld is too hungry at the moment.' 'Please try again in a while.') status = 503 else: msg = 'Unhandled exception.' status = 500 logging.exception('%s: ' % exception.__class__.__name__) technical = '%s [%s]' % (exception, exception.__class__.__name__) if self._text_requested(request): content = '%s\n\n%s\n' % (msg, technical) content_type = 'text/plain' else: tpl = loader.get_template('exception.html') ctx = Context({'msg': msg, 'technical': technical}) content = tpl.render(ctx) content_type = 'text/html' return HttpResponse(content, status=status, content_type=content_type)
[ [ 8, 0, 0.1807, 0.012, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2048, 0.012, 0, 0.66, 0.1, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.2289, 0.012, 0, 0.66, ...
[ "\"\"\"Custom middleware. Some of this may be generally useful.\"\"\"", "import logging", "from google.appengine.api import users", "from google.appengine.runtime import apiproxy_errors", "from google.appengine.runtime import DeadlineExceededError", "from django.conf import settings", "from django.http...
# Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Collection of helper functions.""" import urlparse from google.appengine.ext import db from codereview.exceptions import FetchError def make_url(base, filename, rev): """Helper to construct the URL to fetch. Args: base: The base property of the Issue to which the Patch belongs. filename: The filename property of the Patch instance. rev: Revision number, or None for head revision. Returns: A URL referring to the given revision of the file. """ scheme, netloc, path, _, _, _ = urlparse.urlparse(base) if netloc.endswith(".googlecode.com"): # Handle Google code repositories if rev is None: raise FetchError("Can't access googlecode.com without a revision") if not path.startswith("/svn/"): raise FetchError( "Malformed googlecode.com URL (%s)" % base) path = path[5:] # Strip "/svn/" url = "%s://%s/svn-history/r%d/%s/%s" % (scheme, netloc, rev, path, filename) return url elif netloc.endswith("sourceforge.net") and rev is not None: if path.strip().endswith("/"): path = path.strip()[:-1] else: path = path.strip() splitted_path = path.split("/") url = "%s://%s/%s/!svn/bc/%d/%s/%s" % (scheme, netloc, "/".join(splitted_path[1:3]), rev, "/".join(splitted_path[3:]), filename) return url # Default for viewvc-based URLs (svn.python.org) url = base if not url.endswith('/'): url += '/' url += filename if rev is not None: url += '?rev=%s' % rev return url def to_dbtext(text): """Helper to turn a string into a db.Text instance. Args: text: a string. Returns: A db.Text instance. """ if isinstance(text, unicode): # A TypeError is raised if text is unicode and an encoding is given. return db.Text(text) else: try: return db.Text(text, encoding='utf-8') except UnicodeDecodeError: return db.Text(text, encoding='latin-1') def unify_linebreaks(text): """Helper to return a string with all line breaks converted to LF. Args: text: a string. Returns: A string with all line breaks converted to LF. """ return text.replace('\r\n', '\n').replace('\r', '\n')
[ [ 8, 0, 0.1562, 0.0104, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1771, 0.0104, 0, 0.66, 0.1667, 857, 0, 1, 0, 0, 857, 0, 0 ], [ 1, 0, 0.1979, 0.0104, 0, 0.66...
[ "\"\"\"Collection of helper functions.\"\"\"", "import urlparse", "from google.appengine.ext import db", "from codereview.exceptions import FetchError", "def make_url(base, filename, rev):\n \"\"\"Helper to construct the URL to fetch.\n\n Args:\n base: The base property of the Issue to which the Patch ...
# Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exception classes.""" class RietveldError(Exception): """Base class for all exceptions in this application.""" class FetchError(RietveldError): """Exception raised when fetching of remote files fails."""
[ [ 8, 0, 0.6522, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.8043, 0.087, 0, 0.66, 0.5, 654, 0, 0, 0, 0, 645, 0, 0 ], [ 8, 1, 0.8261, 0.0435, 1, 0.55, ...
[ "\"\"\"Exception classes.\"\"\"", "class RietveldError(Exception):\n \"\"\"Base class for all exceptions in this application.\"\"\"", " \"\"\"Base class for all exceptions in this application.\"\"\"", "class FetchError(RietveldError):\n \"\"\"Exception raised when fetching of remote files fails.\"\"\"", ...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Django template library for Rietveld.""" import cgi from google.appengine.api import memcache from google.appengine.api import users import django.template import django.utils.safestring from django.core.urlresolvers import reverse from codereview import models register = django.template.Library() user_cache = {} def get_links_for_users(user_emails): """Return a dictionary of email->link to user page and fill caches.""" link_dict = {} remaining_emails = set(user_emails) # initialize with email usernames for email in remaining_emails: nick = email.split('@', 1)[0] link_dict[email] = cgi.escape(nick) # look in the local cache for email in remaining_emails: if email in user_cache: link_dict[email] = user_cache[email] remaining_emails = remaining_emails - set(user_cache) if not remaining_emails: return link_dict # then look in memcache memcache_results = memcache.get_multi(remaining_emails, key_prefix="show_user:") for email in memcache_results: link_dict[email] = memcache_results[email] user_cache[email] = memcache_results[email] remaining_emails = remaining_emails - set(memcache_results) if not remaining_emails: return link_dict # and finally hit the datastore accounts = models.Account.get_accounts_for_emails(remaining_emails) for account in accounts: if account and account.user_has_selected_nickname: ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' % (reverse('codereview.views.show_user', args=[account.nickname]), cgi.escape(account.nickname))) link_dict[account.email] = ret datastore_results = dict((e, link_dict[e]) for e in remaining_emails) memcache.set_multi(datastore_results, 300, key_prefix='show_user:') user_cache.update(datastore_results) return link_dict def get_link_for_user(email): """Get a link to a user's profile page.""" links = get_links_for_users([email]) return links[email] @register.filter def show_user(email, arg=None, _autoescape=None, _memcache_results=None): """Render a link to the user's dashboard, with text being the nickname.""" if isinstance(email, users.User): email = email.email() if not arg: user = users.get_current_user() if user is not None and email == user.email(): return 'me' ret = get_link_for_user(email) return django.utils.safestring.mark_safe(ret) @register.filter def show_users(email_list, arg=None): """Render list of links to each user's dashboard.""" new_email_list = [] for email in email_list: if isinstance(email, users.User): email = email.email() new_email_list.append(email) links = get_links_for_users(new_email_list) if not arg: user = users.get_current_user() if user is not None: links[user.email()] = 'me' return django.utils.safestring.mark_safe(', '.join( links[email] for email in email_list)) class UrlAppendViewSettingsNode(django.template.Node): """Django template tag that appends context and column_width parameter. This tag should be used after any URL that requires view settings. Example: <a href='{%url /foo%}{%urlappend_view_settings%}'> The tag tries to get the current column width and context from the template context and if they're present it returns '?param1&param2' otherwise it returns an empty string. """ def __init__(self): super(UrlAppendViewSettingsNode, self).__init__() self.view_context = django.template.Variable('context') self.view_colwidth = django.template.Variable('column_width') def render(self, context): """Returns a HTML fragment.""" url_params = [] current_context = -1 try: current_context = self.view_context.resolve(context) except django.template.VariableDoesNotExist: pass if current_context is None: url_params.append('context=') elif isinstance(current_context, int) and current_context > 0: url_params.append('context=%d' % current_context) current_colwidth = None try: current_colwidth = self.view_colwidth.resolve(context) except django.template.VariableDoesNotExist: pass if current_colwidth is not None: url_params.append('column_width=%d' % current_colwidth) if url_params: return '?%s' % '&'.join(url_params) return '' @register.tag def urlappend_view_settings(_parser, _token): """The actual template tag.""" return UrlAppendViewSettingsNode() def get_nickname(email, never_me=False, request=None): """Return a nickname for an email address. If 'never_me' is True, 'me' is not returned if 'email' belongs to the current logged in user. If 'request' is a HttpRequest, it is used to cache the nickname returned by models.Account.get_nickname_for_email(). """ if isinstance(email, users.User): email = email.email() if not never_me: if request is not None: user = request.user else: user = users.get_current_user() if user is not None and email == user.email(): return 'me' if request is None: return models.Account.get_nickname_for_email(email) # _nicknames is injected into request as a cache. # TODO(maruel): Use memcache instead. # Access to a protected member _nicknames of a client class # pylint: disable=W0212 if getattr(request, '_nicknames', None) is None: request._nicknames = {} if email in request._nicknames: return request._nicknames[email] result = models.Account.get_nickname_for_email(email) request._nicknames[email] = result return result class NicknameNode(django.template.Node): """Renders a nickname for a given email address. The return value is cached if a HttpRequest is available in a 'request' template variable. The template tag accepts one or two arguments. The first argument is the template variable for the email address. If the optional second argument evaluates to True, 'me' as nickname is never rendered. Example usage: {% cached_nickname msg.sender %} {% cached_nickname msg.sender True %} """ def __init__(self, email_address, never_me=''): """Constructor. 'email_address' is the name of the template variable that holds an email address. If 'never_me' evaluates to True, 'me' won't be returned. """ super(NicknameNode, self).__init__() self.email_address = django.template.Variable(email_address) self.never_me = bool(never_me.strip()) self.is_multi = False def render(self, context): try: email = self.email_address.resolve(context) except django.template.VariableDoesNotExist: return '' request = context.get('request') if self.is_multi: return ', '.join(get_nickname(e, self.never_me, request) for e in email) return get_nickname(email, self.never_me, request) @register.tag def nickname(_parser, token): """Almost the same as nickname filter but the result is cached.""" try: _, email_address, never_me = token.split_contents() except ValueError: try: _, email_address = token.split_contents() never_me = '' except ValueError: raise django.template.TemplateSyntaxError( "%r requires exactly one or two arguments" % token.contents.split()[0]) return NicknameNode(email_address, never_me) @register.tag def nicknames(parser, token): """Wrapper for nickname tag with is_multi flag enabled.""" node = nickname(parser, token) node.is_multi = True return node
[ [ 8, 0, 0.0575, 0.0038, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0651, 0.0038, 0, 0.66, 0.0526, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 1, 0, 0.0728, 0.0038, 0, 0.66...
[ "\"\"\"Django template library for Rietveld.\"\"\"", "import cgi", "from google.appengine.api import memcache", "from google.appengine.api import users", "import django.template", "import django.utils.safestring", "from django.core.urlresolvers import reverse", "from codereview import models", "regi...
# Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Diff rendering in HTML for Rietveld.""" import cgi import difflib import re from google.appengine.api import users from django.conf import settings from django.template import loader, RequestContext from codereview import intra_region_diff from codereview import models from codereview import patching from codereview import utils # NOTE: The SplitPatch function is duplicated in upload.py, keep them in sync. def SplitPatch(data): """Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename. """ patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith('Index:'): _, new_filename = line.split(':', 1) new_filename = new_filename.strip() elif line.startswith('Property changes on:'): _, temp_filename = line.split(':', 1) # When a file is modified, paths use '/' between directories, however # when a property is modified '\' is used on Windows. Make them the same # otherwise the file shows up twice. temp_filename = temp_filename.strip().replace('\\', '/') if temp_filename != filename: # File has property changes but no modifications, create a new diff. new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, ''.join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, ''.join(diff))) return patches def ParsePatchSet(patchset): """Patch a patch set into individual patches. Args: patchset: a models.PatchSet instance. Returns: A list of models.Patch instances. """ patches = [] for filename, text in SplitPatch(patchset.data): patches.append(models.Patch(patchset=patchset, text=utils.to_dbtext(text), filename=filename, parent=patchset)) return patches def RenderDiffTableRows(request, old_lines, chunks, patch, colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False, context=settings.DEFAULT_CONTEXT): """Render the HTML table rows for a side-by-side diff for a patch. Args: request: Django Request object. old_lines: List of lines representing the original file. chunks: List of chunks as returned by patching.ParsePatchToChunks(). patch: A models.Patch instance. colwidth: Optional column width (default 80). debug: Optional debugging flag (default False). context: Maximum number of rows surrounding a change (default CONTEXT). Yields: Strings, each of which represents the text rendering one complete pair of lines of the side-by-side diff, possibly including comments. Each yielded string may consist of several <tr> elements. """ rows = _RenderDiffTableRows(request, old_lines, chunks, patch, colwidth, debug) return _CleanupTableRowsGenerator(rows, context) def RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch, colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False, context=settings.DEFAULT_CONTEXT): """Render the HTML table rows for a side-by-side diff between two patches. Args: request: Django Request object. old_lines: List of lines representing the patched file on the left. old_patch: The models.Patch instance corresponding to old_lines. new_lines: List of lines representing the patched file on the right. new_patch: The models.Patch instance corresponding to new_lines. colwidth: Optional column width (default 80). debug: Optional debugging flag (default False). context: Maximum number of visible context lines (default settings.DEFAULT_CONTEXT). Yields: Strings, each of which represents the text rendering one complete pair of lines of the side-by-side diff, possibly including comments. Each yielded string may consist of several <tr> elements. """ rows = _RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch, colwidth, debug) return _CleanupTableRowsGenerator(rows, context) def _CleanupTableRowsGenerator(rows, context): """Cleanup rows returned by _TableRowGenerator for output. Args: rows: List of tuples (tag, text) context: Maximum number of visible context lines. Yields: Rows marked as 'equal' are possibly contracted using _ShortenBuffer(). Stops on rows marked as 'error'. """ buffer = [] for tag, text in rows: if tag == 'equal': buffer.append(text) continue else: for t in _ShortenBuffer(buffer, context): yield t buffer = [] yield text if tag == 'error': yield None break if buffer: for t in _ShortenBuffer(buffer, context): yield t def _ShortenBuffer(buffer, context): """Render a possibly contracted series of HTML table rows. Args: buffer: a list of strings representing HTML table rows. context: Maximum number of visible context lines. If None all lines are returned. Yields: If the buffer has fewer than 3 times context items, yield all the items. Otherwise, yield the first context items, a single table row representing the contraction, and the last context items. """ if context is None or len(buffer) < 3*context: for t in buffer: yield t else: last_id = None for t in buffer[:context]: m = re.match('^<tr( name="hook")? id="pair-(?P<rowcount>\d+)">', t) if m: last_id = int(m.groupdict().get("rowcount")) yield t skip = len(buffer) - 2*context expand_link = [] if skip > 3*context: expand_link.append(('<a href="javascript:M_expandSkipped(%(before)d, ' '%(after)d, \'t\', %(skip)d)">' 'Expand %(context)d before' '</a> | ')) expand_link.append(('<a href="javascript:M_expandSkipped(%(before)d, ' '%(after)d, \'a\', %(skip)d)">Expand all</a>')) if skip > 3*context: expand_link.append((' | ' '<a href="javascript:M_expandSkipped(%(before)d, ' '%(after)d, \'b\', %(skip)d)">' 'Expand %(context)d after' '</a>')) expand_link = ''.join(expand_link) % {'before': last_id+1, 'after': last_id+skip, 'skip': last_id, 'context': max(context, None)} yield ('<tr id="skip-%d"><td colspan="2" align="center" ' 'style="background:lightblue">' '(...skipping <span id="skipcount-%d">%d</span> matching lines...) ' '<span id="skiplinks-%d">%s</span> ' '<span id="skiploading-%d" style="visibility:hidden;">Loading...' '</span>' '</td></tr>\n' % (last_id, last_id, skip, last_id, expand_link, last_id)) for t in buffer[-context:]: yield t def _RenderDiff2TableRows(request, old_lines, old_patch, new_lines, new_patch, colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False): """Internal version of RenderDiff2TableRows(). Args: The same as for RenderDiff2TableRows. Yields: Tuples (tag, row) where tag is an indication of the row type. """ old_dict = {} new_dict = {} for patch, dct in [(old_patch, old_dict), (new_patch, new_dict)]: # XXX GQL doesn't support OR yet... Otherwise we'd be using that. for comment in models.Comment.gql( 'WHERE patch = :1 AND left = FALSE ORDER BY date', patch): if comment.draft and comment.author != request.user: continue # Only show your own drafts comment.complete() lst = dct.setdefault(comment.lineno, []) lst.append(comment) return _TableRowGenerator(old_patch, old_dict, len(old_lines)+1, 'new', new_patch, new_dict, len(new_lines)+1, 'new', _GenerateTriples(old_lines, new_lines), colwidth, debug, request) def _GenerateTriples(old_lines, new_lines): """Helper for _RenderDiff2TableRows yielding input for _TableRowGenerator. Args: old_lines: List of lines representing the patched file on the left. new_lines: List of lines representing the patched file on the right. Yields: Tuples (tag, old_slice, new_slice) where tag is a tag as returned by difflib.SequenceMatchser.get_opcodes(), and old_slice and new_slice are lists of lines taken from old_lines and new_lines. """ sm = difflib.SequenceMatcher(None, old_lines, new_lines) for tag, i1, i2, j1, j2 in sm.get_opcodes(): yield tag, old_lines[i1:i2], new_lines[j1:j2] def _GetComments(request): """Helper that returns comments for a patch. Args: request: Django Request object. Returns: A 2-tuple of (old, new) where old/new are dictionaries that holds comments for that file, mapping from line number to a Comment entity. """ old_dict = {} new_dict = {} # XXX GQL doesn't support OR yet... Otherwise we'd be using # .gql('WHERE patch = :1 AND (draft = FALSE OR author = :2) ORDER BY data', # patch, request.user) for comment in models.Comment.gql('WHERE patch = :1 ORDER BY date', request.patch): if comment.draft and comment.author != request.user: continue # Only show your own drafts comment.complete() if comment.left: dct = old_dict else: dct = new_dict dct.setdefault(comment.lineno, []).append(comment) return old_dict, new_dict def _RenderDiffTableRows(request, old_lines, chunks, patch, colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False): """Internal version of RenderDiffTableRows(). Args: The same as for RenderDiffTableRows. Yields: Tuples (tag, row) where tag is an indication of the row type. """ old_dict = {} new_dict = {} if patch: old_dict, new_dict = _GetComments(request) old_max, new_max = _ComputeLineCounts(old_lines, chunks) return _TableRowGenerator(patch, old_dict, old_max, 'old', patch, new_dict, new_max, 'new', patching.PatchChunks(old_lines, chunks), colwidth, debug, request) def _TableRowGenerator(old_patch, old_dict, old_max, old_snapshot, new_patch, new_dict, new_max, new_snapshot, triple_iterator, colwidth=settings.DEFAULT_COLUMN_WIDTH, debug=False, request=None): """Helper function to render side-by-side table rows. Args: old_patch: First models.Patch instance. old_dict: Dictionary with line numbers as keys and comments as values (left) old_max: Line count of the patch on the left. old_snapshot: A tag used in the comments form. new_patch: Second models.Patch instance. new_dict: Same as old_dict, but for the right side. new_max: Line count of the patch on the right. new_snapshot: A tag used in the comments form. triple_iterator: Iterator that yields (tag, old, new) triples. colwidth: Optional column width (default 80). debug: Optional debugging flag (default False). Yields: Tuples (tag, row) where tag is an indication of the row type and row is an HTML fragment representing one or more <td> elements. """ diff_params = intra_region_diff.GetDiffParams(dbg=debug) ndigits = 1 + max(len(str(old_max)), len(str(new_max))) indent = 1 + ndigits old_offset = new_offset = 0 row_count = 0 # Render a row with a message if a side is empty or both sides are equal. if old_patch == new_patch and (old_max == 0 or new_max == 0): if old_max == 0: msg_old = '(Empty)' else: msg_old = '' if new_max == 0: msg_new = '(Empty)' else: msg_new = '' yield '', ('<tr><td class="info">%s</td>' '<td class="info">%s</td></tr>' % (msg_old, msg_new)) elif old_patch is None or new_patch is None: msg_old = msg_new = '' if old_patch is None: msg_old = '(no file at all)' if new_patch is None: msg_new = '(no file at all)' yield '', ('<tr><td class="info">%s</td>' '<td class="info">%s</td></tr>' % (msg_old, msg_new)) elif old_patch != new_patch and old_patch.lines == new_patch.lines: yield '', ('<tr><td class="info" colspan="2">' '(Both sides are equal)</td></tr>') for tag, old, new in triple_iterator: if tag.startswith('error'): yield 'error', '<tr><td><h3>%s</h3></td></tr>\n' % cgi.escape(tag) return old1 = old_offset old_offset = old2 = old1 + len(old) new1 = new_offset new_offset = new2 = new1 + len(new) old_buff = [] new_buff = [] frag_list = [] do_ir_diff = tag == 'replace' and intra_region_diff.CanDoIRDiff(old, new) for i in xrange(max(len(old), len(new))): row_count += 1 old_lineno = old1 + i + 1 new_lineno = new1 + i + 1 old_valid = old1+i < old2 new_valid = new1+i < new2 # Start rendering the first row frags = [] if i == 0 and tag != 'equal': # Mark the first row of each non-equal chunk as a 'hook'. frags.append('<tr name="hook"') else: frags.append('<tr') frags.append(' id="pair-%d">' % row_count) old_intra_diff = '' new_intra_diff = '' if old_valid: old_intra_diff = old[i] if new_valid: new_intra_diff = new[i] frag_list.append(frags) if do_ir_diff: # Don't render yet. Keep saving state necessary to render the whole # region until we have encountered all the lines in the region. old_buff.append([old_valid, old_lineno, old_intra_diff]) new_buff.append([new_valid, new_lineno, new_intra_diff]) else: # We render line by line as usual if do_ir_diff is false old_intra_diff = intra_region_diff.Break( old_intra_diff, 0, colwidth, "\n" + " "*indent) new_intra_diff = intra_region_diff.Break( new_intra_diff, 0, colwidth, "\n" + " "*indent) old_buff_out = [[old_valid, old_lineno, (old_intra_diff, True, None)]] new_buff_out = [[new_valid, new_lineno, (new_intra_diff, True, None)]] for tg, frag in _RenderDiffInternal(old_buff_out, new_buff_out, ndigits, tag, frag_list, do_ir_diff, old_dict, new_dict, old_patch, new_patch, old_snapshot, new_snapshot, debug, request): yield tg, frag frag_list = [] if do_ir_diff: # So this was a replace block which means that the whole region still # needs to be rendered. old_lines = [b[2] for b in old_buff] new_lines = [b[2] for b in new_buff] ret = intra_region_diff.IntraRegionDiff(old_lines, new_lines, diff_params) old_chunks, new_chunks, ratio = ret old_tag = 'old' new_tag = 'new' old_diff_out = intra_region_diff.RenderIntraRegionDiff( old_lines, old_chunks, old_tag, ratio, limit=colwidth, indent=indent, mark_tabs=True, dbg=debug) new_diff_out = intra_region_diff.RenderIntraRegionDiff( new_lines, new_chunks, new_tag, ratio, limit=colwidth, indent=indent, mark_tabs=True, dbg=debug) for (i, b) in enumerate(old_buff): b[2] = old_diff_out[i] for (i, b) in enumerate(new_buff): b[2] = new_diff_out[i] for tg, frag in _RenderDiffInternal(old_buff, new_buff, ndigits, tag, frag_list, do_ir_diff, old_dict, new_dict, old_patch, new_patch, old_snapshot, new_snapshot, debug, request): yield tg, frag old_buff = [] new_buff = [] def _RenderDiffInternal(old_buff, new_buff, ndigits, tag, frag_list, do_ir_diff, old_dict, new_dict, old_patch, new_patch, old_snapshot, new_snapshot, debug, request): """Helper for _TableRowGenerator().""" obegin = (intra_region_diff.BEGIN_TAG % intra_region_diff.COLOR_SCHEME['old']['match']) nbegin = (intra_region_diff.BEGIN_TAG % intra_region_diff.COLOR_SCHEME['new']['match']) oend = intra_region_diff.END_TAG nend = oend user = users.get_current_user() for i in xrange(len(old_buff)): tg = tag old_valid, old_lineno, old_out = old_buff[i] new_valid, new_lineno, new_out = new_buff[i] old_intra_diff, old_has_newline, old_debug_info = old_out new_intra_diff, new_has_newline, new_debug_info = new_out frags = frag_list[i] # Render left text column frags.append(_RenderDiffColumn(old_valid, tag, ndigits, old_lineno, obegin, oend, old_intra_diff, do_ir_diff, old_has_newline, 'old')) # Render right text column frags.append(_RenderDiffColumn(new_valid, tag, ndigits, new_lineno, nbegin, nend, new_intra_diff, do_ir_diff, new_has_newline, 'new')) # End rendering the first row frags.append('</tr>\n') if debug: frags.append('<tr>') if old_debug_info: frags.append('<td class="debug-info">%s</td>' % old_debug_info.replace('\n', '<br>')) else: frags.append('<td></td>') if new_debug_info: frags.append('<td class="debug-info">%s</td>' % new_debug_info.replace('\n', '<br>')) else: frags.append('<td></td>') frags.append('</tr>\n') if old_patch or new_patch: # Start rendering the second row if ((old_valid and old_lineno in old_dict) or (new_valid and new_lineno in new_dict)): tg += '_comment' frags.append('<tr class="inline-comments" name="hook">') else: frags.append('<tr class="inline-comments">') # Render left inline comments frags.append(_RenderInlineComments(old_valid, old_lineno, old_dict, user, old_patch, old_snapshot, 'old', request)) # Render right inline comments frags.append(_RenderInlineComments(new_valid, new_lineno, new_dict, user, new_patch, new_snapshot, 'new', request)) # End rendering the second row frags.append('</tr>\n') # Yield the combined fragments yield tg, ''.join(frags) def _RenderDiffColumn(line_valid, tag, ndigits, lineno, begin, end, intra_diff, do_ir_diff, has_newline, prefix): """Helper function for _RenderDiffInternal(). Returns: A rendered column. """ if line_valid: cls_attr = '%s%s' % (prefix, tag) if tag == 'equal': lno = '%*d' % (ndigits, lineno) else: lno = _MarkupNumber(ndigits, lineno, 'u') if tag == 'replace': col_content = ('%s%s %s%s' % (begin, lno, end, intra_diff)) # If IR diff has been turned off or there is no matching new line at # the end then switch to dark background CSS style. if not do_ir_diff or not has_newline: cls_attr = cls_attr + '1' else: col_content = '%s %s' % (lno, intra_diff) return '<td class="%s" id="%scode%d">%s</td>' % (cls_attr, prefix, lineno, col_content) else: return '<td class="%sblank"></td>' % prefix def _RenderInlineComments(line_valid, lineno, data, user, patch, snapshot, prefix, request): """Helper function for _RenderDiffInternal(). Returns: Rendered comments. """ comments = [] if line_valid: comments.append('<td id="%s-line-%s">' % (prefix, lineno)) if lineno in data: comments.append( _ExpandTemplate('inline_comment.html', request, user=user, patch=patch, patchset=patch.patchset, issue=patch.patchset.issue, snapshot=snapshot, side='a' if prefix == 'old' else 'b', comments=data[lineno], lineno=lineno, )) comments.append('</td>') else: comments.append('<td></td>') return ''.join(comments) def RenderUnifiedTableRows(request, parsed_lines): """Render the HTML table rows for a unified diff for a patch. Args: request: Django Request object. parsed_lines: List of tuples for each line that contain the line number, if they exist, for the old and new file. Returns: A list of html table rows. """ old_dict, new_dict = _GetComments(request) rows = [] for old_line_no, new_line_no, line_text in parsed_lines: row1_id = row2_id = '' # When a line is unchanged (i.e. both old_line_no and new_line_no aren't 0) # pick the old column line numbers when adding a comment. if old_line_no: row1_id = 'id="oldcode%d"' % old_line_no row2_id = 'id="old-line-%d"' % old_line_no elif new_line_no: row1_id = 'id="newcode%d"' % new_line_no row2_id = 'id="new-line-%d"' % new_line_no if line_text[0] == '+': style = 'udiffadd' elif line_text[0] == '-': style = 'udiffremove' else: style = '' rows.append('<tr><td class="udiff %s" %s>%s</td></tr>' % (style, row1_id, cgi.escape(line_text))) frags = [] if old_line_no in old_dict or new_line_no in new_dict: frags.append('<tr class="inline-comments" name="hook">') if old_line_no in old_dict: dct = old_dict line_no = old_line_no snapshot = 'old' else: dct = new_dict line_no = new_line_no snapshot = 'new' frags.append(_RenderInlineComments(True, line_no, dct, request.user, request.patch, snapshot, snapshot, request)) else: frags.append('<tr class="inline-comments">') frags.append('<td ' + row2_id +'></td>') frags.append('</tr>') rows.append(''.join(frags)) return rows def _ComputeLineCounts(old_lines, chunks): """Compute the length of the old and new sides of a diff. Args: old_lines: List of lines representing the original file. chunks: List of chunks as returned by patching.ParsePatchToChunks(). Returns: A tuple (old_len, new_len) representing len(old_lines) and len(new_lines), where new_lines is the list representing the result of applying the patch chunks to old_lines, however, without actually computing new_lines. """ old_len = len(old_lines) new_len = old_len if chunks: (_, old_b), (_, new_b), old_lines, _ = chunks[-1] new_len += new_b - old_b return old_len, new_len def _MarkupNumber(ndigits, number, tag): """Format a number in HTML in a given width with extra markup. Args: ndigits: the total width available for formatting number: the number to be formatted tag: HTML tag name, e.g. 'u' Returns: An HTML string that displays as ndigits wide, with the number right-aligned and surrounded by an HTML tag; for example, _MarkupNumber(42, 4, 'u') returns ' <u>42</u>'. """ formatted_number = str(number) space_prefix = ' ' * (ndigits - len(formatted_number)) return '%s<%s>%s</%s>' % (space_prefix, tag, formatted_number, tag) def _ExpandTemplate(name, request, **params): """Wrapper around django.template.loader.render_to_string(). For convenience, this takes keyword arguments instead of a dict. """ rslt = loader.render_to_string(name, params, context_instance=RequestContext(request)) return rslt.encode('utf-8')
[ [ 8, 0, 0.0214, 0.0014, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0242, 0.0014, 0, 0.66, 0.0357, 934, 0, 1, 0, 0, 934, 0, 0 ], [ 1, 0, 0.0256, 0.0014, 0, 0.66...
[ "\"\"\"Diff rendering in HTML for Rietveld.\"\"\"", "import cgi", "import difflib", "import re", "from google.appengine.api import users", "from django.conf import settings", "from django.template import loader, RequestContext", "from codereview import intra_region_diff", "from codereview import mod...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Mimic pyquick exercise -- optional extra exercise. Google's Python Class Read in the file specified on the command line. Do a simple split() on whitespace to obtain all the words in the file. Rather than read the file line by line, it's easier to read it into one giant string and split it once. Build a "mimic" dict that maps each word that appears in the file to a list of all the words that immediately follow that word in the file. The list of words can be be in any order and should include duplicates. So for example the key "and" might have the list ["then", "best", "then", "after", ...] listing all the words which came after "and" in the text. We'll say that the empty string is what comes before the first word in the file. With the mimic dict, it's fairly easy to emit random text that mimics the original. Print a word, then look up what words might come next and pick one at random as the next work. Use the empty string as the first word to prime things. If we ever get stuck with a word that is not in the dict, go back to the empty string to keep things moving. Note: the standard python module 'random' includes a random.choice(list) method which picks a random element from a non-empty list. For fun, feed your program to itself as input. Could work on getting it to put in linebreaks around 70 columns, so the output looks better. """ import random import sys import re def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" f = open(filename, 'r') str = f.read() if len(str) == 0: return -1 str = re.sub("[!_?/\|<>{}():;,.@#$%^&*--+='~`]","",str) str = str.lower().split() ans = {"":[str[0]]} index=0 for word in str[:-1]: if ans.get(word): ans[word].append(str[index+1]) else: ans[word]=[str[index+1]] index+=1 return ans def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" temp = '' print word, for i in range(200): if mimic_dict.get(word) == None: word = '' temp = random.choice(mimic_dict[word]) print temp, word = temp return # Provided main(), calls mimic_dict() and mimic() def main(): if len(sys.argv) != 2: print 'usage: ./mimic.py file-to-read' sys.exit(1) dict = mimic_dict(sys.argv[1]) print_mimic(dict, '') if __name__ == '__main__': main()
[ [ 8, 0, 0.2802, 0.3736, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4835, 0.011, 0, 0.66, 0.1429, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.4945, 0.011, 0, 0.66, ...
[ "\"\"\"Mimic pyquick exercise -- optional extra exercise.\nGoogle's Python Class\n\nRead in the file specified on the command line.\nDo a simple split() on whitespace to obtain all the words in the file.\nRather than read the file line by line, it's easier to read\nit into one giant string and split it once.", "i...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): ans=[] i=0 while i < len(nums): if nums[i] != nums[i-1]: ans.append(nums[i]) i+=1 return ans # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): ans=[] i=0 j=0 while i+j < len(list1)+len(list2): if i < len(list1) and j < len(list2): if list1[i] < list2[j]: ans.append(list1[i]) i+=1 else: ans.append(list2[j]) j+=1 elif j >= len(list2): ans.append(list1[i]) i+=1 else: ans.append(list2[j]) j+=1 return ans # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2202, 0.0952, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 2 ], [ 14, 1, 0.1905, 0.0119, 1, 0.37, 0, 276, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 1, 0.2024, 0.0119, 1, 0.37, ...
[ "def remove_adjacent(nums):\n ans=[]\n i=0\n while i < len(nums):\n if nums[i] != nums[i-1]:\n ans.append(nums[i])\n i+=1\n return ans", " ans=[]", " i=0", " while i < len(nums):\n if nums[i] != nums[i-1]:\n ans.append(nums[i])\n i+=1", " if nums[i] != nums[i-1]:\n ans.a...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys def read_file(filename): f = open(filename, 'r') ans=[] buf = '' temp= '' for line in f: for i in line: if i == ' ' or i == '\n': buf = buf.lower() ans += buf.split() buf='' else: buf += i f.close dict = {} for i in ans: if i in dict: dict[i]+=1 else: dict[i]=1 return dict def print_words(filename): f = read_file(filename) for key in sorted(f.keys(), reverse = True, key=f.get): print key,' ', f.get(key) def print_top(filename): dict = read_file(filename) count=0 for i in sorted(dict.keys(), reverse = True, key=dict.get): if count < len(dict) and count < 20: print i, dict.get(i) count+=1 else: break # +++your code here+++ # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2238, 0.2857, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.381, 0.0095, 0, 0.66, 0.1667, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 2, 0, 0.4952, 0.2, 0, 0.66, ...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Mimic pyquick exercise -- optional extra exercise. Google's Python Class Read in the file specified on the command line. Do a simple split() on whitespace to obtain all the words in the file. Rather than read the file line by line, it's easier to read it into one giant string and split it once. Build a "mimic" dict that maps each word that appears in the file to a list of all the words that immediately follow that word in the file. The list of words can be be in any order and should include duplicates. So for example the key "and" might have the list ["then", "best", "then", "after", ...] listing all the words which came after "and" in the text. We'll say that the empty string is what comes before the first word in the file. With the mimic dict, it's fairly easy to emit random text that mimics the original. Print a word, then look up what words might come next and pick one at random as the next work. Use the empty string as the first word to prime things. If we ever get stuck with a word that is not in the dict, go back to the empty string to keep things moving. Note: the standard python module 'random' includes a random.choice(list) method which picks a random element from a non-empty list. For fun, feed your program to itself as input. Could work on getting it to put in linebreaks around 70 columns, so the output looks better. """ import random import sys import re def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" f = open(filename, 'r') str = f.read() if len(str) == 0: return -1 str = re.sub("[!_?/\|<>{}():;,.@#$%^&*--+='~`]","",str) str = str.lower().split() ans = {"":[str[0]]} index=0 for word in str[:-1]: if ans.get(word): ans[word].append(str[index+1]) else: ans[word]=[str[index+1]] index+=1 return ans def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" temp = '' print word, for i in range(200): if mimic_dict.get(word) == None: word = '' temp = random.choice(mimic_dict[word]) print temp, word = temp return # Provided main(), calls mimic_dict() and mimic() def main(): if len(sys.argv) != 2: print 'usage: ./mimic.py file-to-read' sys.exit(1) dict = mimic_dict(sys.argv[1]) print_mimic(dict, '') if __name__ == '__main__': main()
[ [ 8, 0, 0.2802, 0.3736, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4835, 0.011, 0, 0.66, 0.1429, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.4945, 0.011, 0, 0.66, ...
[ "\"\"\"Mimic pyquick exercise -- optional extra exercise.\nGoogle's Python Class\n\nRead in the file specified on the command line.\nDo a simple split() on whitespace to obtain all the words in the file.\nRather than read the file line by line, it's easier to read\nit into one giant string and split it once.", "i...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): ans=[] i=0 while i < len(nums): if nums[i] != nums[i-1]: ans.append(nums[i]) i+=1 return ans # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): ans=[] i=0 j=0 while i+j < len(list1)+len(list2): if i < len(list1) and j < len(list2): if list1[i] < list2[j]: ans.append(list1[i]) i+=1 else: ans.append(list2[j]) j+=1 elif j >= len(list2): ans.append(list1[i]) i+=1 else: ans.append(list2[j]) j+=1 return ans # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2202, 0.0952, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 2 ], [ 14, 1, 0.1905, 0.0119, 1, 0.96, 0, 276, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 1, 0.2024, 0.0119, 1, 0.96, ...
[ "def remove_adjacent(nums):\n ans=[]\n i=0\n while i < len(nums):\n if nums[i] != nums[i-1]:\n ans.append(nums[i])\n i+=1\n return ans", " ans=[]", " i=0", " while i < len(nums):\n if nums[i] != nums[i-1]:\n ans.append(nums[i])\n i+=1", " if nums[i] != nums[i-1]:\n ans.a...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys def read_file(filename): f = open(filename, 'r') ans=[] buf = '' temp= '' for line in f: for i in line: if i == ' ' or i == '\n': buf = buf.lower() ans += buf.split() buf='' else: buf += i f.close dict = {} for i in ans: if i in dict: dict[i]+=1 else: dict[i]=1 return dict def print_words(filename): f = read_file(filename) for key in sorted(f.keys(), reverse = True, key=f.get): print key,' ', f.get(key) def print_top(filename): dict = read_file(filename) count=0 for i in sorted(dict.keys(), reverse = True, key=dict.get): if count < len(dict) and count < 20: print i, dict.get(i) count+=1 else: break # +++your code here+++ # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2238, 0.2857, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.381, 0.0095, 0, 0.66, 0.1667, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 2, 0, 0.4952, 0.2, 0, 0.66, ...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): if nums == []: return nums last = nums[0] mlist = [] mlist.append(last) for i in nums[1:]: if i!= last: mlist.append(i) last = i return mlist # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): list1.extend(list2) list1.sort() return list1 # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2778, 0.1528, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 2 ], [ 4, 1, 0.2292, 0.0278, 1, 0.03, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 13, 2, 0.2361, 0.0139, 2, 0.48, ...
[ "def remove_adjacent(nums):\n if nums == []:\n return nums\n last = nums[0]\n mlist = []\n mlist.append(last)\n for i in nums[1:]:\n if i!= last:", " if nums == []:\n return nums", " return nums", " last = nums[0]", " mlist = []", " mlist.append(last)", " for i in nums[1:]:\n if...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys import re # +++your code here+++ # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. def read_file(filename): f=open(filename,'r') text = f.read() text = re.sub("[/.,\';:?!-]","",text) text = text.lower() words = text.split() wordsMap = {} for word in words: if word in wordsMap: wordsMap[word] +=1 else: wordsMap[word] = 1 return wordsMap def print_words(filename): wordsMap = read_file(filename) keys=sorted(wordsMap.keys()) for key in keys: print key, wordsMap[key] return def print_top(filename): wordsMap = read_file(filename) items=sorted(wordsMap.items(),key=lambda (k, v): v,reverse=True) for item in items[:20]: print item[0], item[1] ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2398, 0.3061, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4082, 0.0102, 0, 0.66, 0.1429, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4184, 0.0102, 0, 0.66...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): if nums == []: return nums last = nums[0] mlist = [] mlist.append(last) for i in nums[1:]: if i!= last: mlist.append(i) last = i return mlist # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): list1.extend(list2) list1.sort() return list1 # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2778, 0.1528, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 2 ], [ 4, 1, 0.2292, 0.0278, 1, 0.14, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 13, 2, 0.2361, 0.0139, 2, 0.06, ...
[ "def remove_adjacent(nums):\n if nums == []:\n return nums\n last = nums[0]\n mlist = []\n mlist.append(last)\n for i in nums[1:]:\n if i!= last:", " if nums == []:\n return nums", " return nums", " last = nums[0]", " mlist = []", " mlist.append(last)", " for i in nums[1:]:\n if...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys import re # +++your code here+++ # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. def read_file(filename): f=open(filename,'r') text = f.read() text = re.sub("[/.,\';:?!-]","",text) text = text.lower() words = text.split() wordsMap = {} for word in words: if word in wordsMap: wordsMap[word] +=1 else: wordsMap[word] = 1 return wordsMap def print_words(filename): wordsMap = read_file(filename) keys=sorted(wordsMap.keys()) for key in keys: print key, wordsMap[key] return def print_top(filename): wordsMap = read_file(filename) items=sorted(wordsMap.items(),key=lambda (k, v): v,reverse=True) for item in items[:20]: print item[0], item[1] ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2398, 0.3061, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4082, 0.0102, 0, 0.66, 0.1429, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4184, 0.0102, 0, 0.66...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Mimic pyquick exercise -- optional extra exercise. Google's Python Class Read in the file specified on the command line. Do a simple split() on whitespace to obtain all the words in the file. Rather than read the file line by line, it's easier to read it into one giant string and split it once. Build a "mimic" dict that maps each word that appears in the file to a list of all the words that immediately follow that word in the file. The list of words can be be in any order and should include duplicates. So for example the key "and" might have the list ["then", "best", "then", "after", ...] listing all the words which came after "and" in the text. We'll say that the empty string is what comes before the first word in the file. With the mimic dict, it's fairly easy to emit random text that mimics the original. Print a word, then look up what words might come next and pick one at random as the next work. Use the empty string as the first word to prime things. If we ever get stuck with a word that is not in the dict, go back to the empty string to keep things moving. Note: the standard python module 'random' includes a random.choice(list) method which picks a random element from a non-empty list. For fun, feed your program to itself as input. Could work on getting it to put in linebreaks around 70 columns, so the output looks better. """ import random import sys import re def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" f=open(filename,'r') text=f.read() text=re.sub("[(/.,!?:-;)`]","",text) str1=text.lower().split() mimic={"":str1[0]} suf=[] for i in range(len(str1)): if mimic.get(str1[i])==None: j=i for j in range(len(str1)-1): if str1[i]==str1[j]: suf.append((str1[j+1])) mimic.update({str1[i]:suf}) suf=[] return mimic def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" # +++your code here+++ for i in range(200): if word=='' or mimic_dict.get(word)==[]or mimic_dict.get(word)==None: word=mimic_dict.get('') sys.stdout.write("%s " %(word)) else: word=random.choice(mimic_dict.get(word)) sys.stdout.write("%s " %(word)) return # Provided main(), calls mimic_dict() and mimic() def main(): if len(sys.argv) != 2: print 'usage: ./mimic.py file-to-read' sys.exit(1) dict = mimic_dict(sys.argv[1]) print_mimic(dict, '') if __name__ == '__main__': main()
[ [ 8, 0, 0.2602, 0.3469, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.449, 0.0102, 0, 0.66, 0.1429, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.4592, 0.0102, 0, 0.66,...
[ "\"\"\"Mimic pyquick exercise -- optional extra exercise.\nGoogle's Python Class\n\nRead in the file specified on the command line.\nDo a simple split() on whitespace to obtain all the words in the file.\nRather than read the file line by line, it's easier to read\nit into one giant string and split it once.", "i...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): # +++your code here+++ for x in nums: if(nums.count(x) > 1): nums.remove(x) return nums # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): # +++your code here+++ return sorted(list1+list2) # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2535, 0.0986, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 2 ], [ 6, 1, 0.2676, 0.0423, 1, 0.44, 0, 190, 2, 0, 0, 0, 0, 0, 2 ], [ 4, 2, 0.2746, 0.0282, 2, 0.82, ...
[ "def remove_adjacent(nums):\n # +++your code here+++\n \n for x in nums:\n if(nums.count(x) > 1):\n nums.remove(x)\n return nums", " for x in nums:\n if(nums.count(x) > 1):\n nums.remove(x)", " if(nums.count(x) > 1):\n nums.remove(x)", " nums.remove(x)", " return nums", ...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys import re # +++your code here+++ def print_words(filename): fo = open(filename, "r+") str1 = fo.read() str1=re.sub("[(/.,!?:-;)`]", "",str1) c={}; keyGen(str1,c) fo.close() printing(c) def keyGen(str1,c): i=0; k=0 temp=str1.lower().split() for x in temp: c.update({temp[i]:temp.count(x)}) i+=1 def printing(c): for k in c: print k,'\t',c[k] def print_top(filename): fo = open(filename, "r+") str1 = fo.read() str1=re.sub("[/.,!?;]", "",str1) c={}; keyGen(str1,c) b = list(c.items()) b.sort(key=lambda item: item[1],reverse=True) for item in b[:20]: print(item[0] +' - '+ str(item[1])) fo.close() # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2217, 0.283, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3774, 0.0094, 0, 0.66, 0.125, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.3868, 0.0094, 0, 0.66, ...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Mimic pyquick exercise -- optional extra exercise. Google's Python Class Read in the file specified on the command line. Do a simple split() on whitespace to obtain all the words in the file. Rather than read the file line by line, it's easier to read it into one giant string and split it once. Build a "mimic" dict that maps each word that appears in the file to a list of all the words that immediately follow that word in the file. The list of words can be be in any order and should include duplicates. So for example the key "and" might have the list ["then", "best", "then", "after", ...] listing all the words which came after "and" in the text. We'll say that the empty string is what comes before the first word in the file. With the mimic dict, it's fairly easy to emit random text that mimics the original. Print a word, then look up what words might come next and pick one at random as the next work. Use the empty string as the first word to prime things. If we ever get stuck with a word that is not in the dict, go back to the empty string to keep things moving. Note: the standard python module 'random' includes a random.choice(list) method which picks a random element from a non-empty list. For fun, feed your program to itself as input. Could work on getting it to put in linebreaks around 70 columns, so the output looks better. """ import random import sys import re def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" f=open(filename,'r') text=f.read() text=re.sub("[(/.,!?:-;)`]","",text) str1=text.lower().split() mimic={"":str1[0]} suf=[] for i in range(len(str1)): if mimic.get(str1[i])==None: j=i for j in range(len(str1)-1): if str1[i]==str1[j]: suf.append((str1[j+1])) mimic.update({str1[i]:suf}) suf=[] return mimic def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" # +++your code here+++ for i in range(200): if word=='' or mimic_dict.get(word)==[]or mimic_dict.get(word)==None: word=mimic_dict.get('') sys.stdout.write("%s " %(word)) else: word=random.choice(mimic_dict.get(word)) sys.stdout.write("%s " %(word)) return # Provided main(), calls mimic_dict() and mimic() def main(): if len(sys.argv) != 2: print 'usage: ./mimic.py file-to-read' sys.exit(1) dict = mimic_dict(sys.argv[1]) print_mimic(dict, '') if __name__ == '__main__': main()
[ [ 8, 0, 0.2602, 0.3469, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.449, 0.0102, 0, 0.66, 0.1429, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.4592, 0.0102, 0, 0.66,...
[ "\"\"\"Mimic pyquick exercise -- optional extra exercise.\nGoogle's Python Class\n\nRead in the file specified on the command line.\nDo a simple split() on whitespace to obtain all the words in the file.\nRather than read the file line by line, it's easier to read\nit into one giant string and split it once.", "i...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): # +++your code here+++ for x in nums: if(nums.count(x) > 1): nums.remove(x) return nums # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): # +++your code here+++ return sorted(list1+list2) # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2535, 0.0986, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 2 ], [ 6, 1, 0.2676, 0.0423, 1, 0.85, 0, 190, 2, 0, 0, 0, 0, 0, 2 ], [ 4, 2, 0.2746, 0.0282, 2, 0.79, ...
[ "def remove_adjacent(nums):\n # +++your code here+++\n \n for x in nums:\n if(nums.count(x) > 1):\n nums.remove(x)\n return nums", " for x in nums:\n if(nums.count(x) > 1):\n nums.remove(x)", " if(nums.count(x) > 1):\n nums.remove(x)", " nums.remove(x)", " return nums", ...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys import re # +++your code here+++ def print_words(filename): fo = open(filename, "r+") str1 = fo.read() str1=re.sub("[(/.,!?:-;)`]", "",str1) c={}; keyGen(str1,c) fo.close() printing(c) def keyGen(str1,c): i=0; k=0 temp=str1.lower().split() for x in temp: c.update({temp[i]:temp.count(x)}) i+=1 def printing(c): for k in c: print k,'\t',c[k] def print_top(filename): fo = open(filename, "r+") str1 = fo.read() str1=re.sub("[/.,!?;]", "",str1) c={}; keyGen(str1,c) b = list(c.items()) b.sort(key=lambda item: item[1],reverse=True) for item in b[:20]: print(item[0] +' - '+ str(item[1])) fo.close() # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2217, 0.283, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.3774, 0.0094, 0, 0.66, 0.125, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.3868, 0.0094, 0, 0.66, ...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Mimic pyquick exercise -- optional extra exercise. Google's Python Class Read in the file specified on the command line. Do a simple split() on whitespace to obtain all the words in the file. Rather than read the file line by line, it's easier to read it into one giant string and split it once. Build a "mimic" dict that maps each word that appears in the file to a list of all the words that immediately follow that word in the file. The list of words can be be in any order and should include duplicates. So for example the key "and" might have the list ["then", "best", "then", "after", ...] listing all the words which came after "and" in the text. We'll say that the empty string is what comes before the first word in the file. With the mimic dict, it's fairly easy to emit random text that mimics the original. Print a word, then look up what words might come next and pick one at random as the next work. Use the empty string as the first word to prime things. If we ever get stuck with a word that is not in the dict, go back to the empty string to keep things moving. Note: the standard python module 'random' includes a random.choice(list) method which picks a random element from a non-empty list. For fun, feed your program to itself as input. Could work on getting it to put in linebreaks around 70 columns, so the output looks better. """ import random import sys import re def mimic_dict(filename): """Returns mimic dict mapping each word to list of words which follow it.""" f=open(filename,'r') text=f.read() text=re.sub("[/.,'`;:?!-()]","",text) lines=text.lower().split() if len(lines)==0: return 0 mimic={"":lines[0]} suf=[] for i in range (len(lines)): if mimic.get(lines[i])==None: j=i for j in range (len(lines)-1): if lines[i]==lines[j]: suf.append(lines[j+1]) mimic.update({lines[i]:suf}) suf=[] return mimic def print_mimic(mimic_dict, word): """Given mimic dict and start word, prints 200 random words.""" if mimic_dict==0: print "Sorry, your file is empty :(" return for i in range(200): if word=='' or mimic_dict.get(word)==[]: word=mimic_dict.get('') sys.stdout.write("%s " %(word)) else: word=random.choice(mimic_dict.get(word)) sys.stdout.write("%s " %(word)) return # Provided main(), calls mimic_dict() and mimic() def main(): if len(sys.argv) != 2: print 'usage: ./mimic.py file-to-read' sys.exit(1) dict = mimic_dict(sys.argv[1]) print_mimic(dict, '') if __name__ == '__main__': main()
[ [ 8, 0, 0.2656, 0.3542, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4583, 0.0104, 0, 0.66, 0.1429, 715, 0, 1, 0, 0, 715, 0, 0 ], [ 1, 0, 0.4688, 0.0104, 0, 0.66...
[ "\"\"\"Mimic pyquick exercise -- optional extra exercise.\nGoogle's Python Class\n\nRead in the file specified on the command line.\nDo a simple split() on whitespace to obtain all the words in the file.\nRather than read the file line by line, it's easier to read\nit into one giant string and split it once.", "i...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic list exercises # D. Given a list of numbers, return a list where # all adjacent == elements have been reduced to a single element, # so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or # modify the passed in list. def remove_adjacent(nums): i=1 newlist=[] for i in range (len(nums)): if nums[i]!=nums[i-1]: newlist.append(nums[i]) return newlist # E. Given two lists sorted in increasing order, create and return a merged # list of all the elements in sorted order. You may modify the passed in lists. # Ideally, the solution should work in "linear" time, making a single # pass of both lists. def linear_merge(list1, list2): list1.extend(list2) return sorted(list1) # Note: the solution above is kind of cute, but unforunately list.pop(0) # is not constant time with the standard python list implementation, so # the above is not strictly linear time. # An alternate approach uses pop(-1) to remove the endmost elements # from each list, building a solution list which is backwards. # Then use reversed() to put the result back in the correct order. That # solution works in linear time, but is more ugly. # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # Calls the above functions with interesting inputs. def main(): print 'remove_adjacent' test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3]) test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3]) test(remove_adjacent([]), []) print print 'linear_merge' test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']), ['aa', 'bb', 'cc', 'xx', 'zz']) test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']), ['aa', 'aa', 'aa', 'bb', 'bb']) if __name__ == '__main__': main()
[ [ 2, 0, 0.2687, 0.1045, 0, 0.66, 0, 855, 0, 1, 1, 0, 0, 0, 3 ], [ 14, 1, 0.2388, 0.0149, 1, 0.62, 0, 826, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 1, 0.2537, 0.0149, 1, 0.62, ...
[ "def remove_adjacent(nums):\n i=1\n newlist=[]\n for i in range (len(nums)):\n if nums[i]!=nums[i-1]:\n newlist.append(nums[i])\n return newlist", " i=1", " newlist=[]", " for i in range (len(nums)):\n if nums[i]!=nums[i-1]:\n newlist.append(nums[i])", " if nums[i]!=nums[i-1]:\n ...
#!/usr/bin/python -tt # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ """Wordcount exercise Google's Python class The main() below is already defined and complete. It calls print_words() and print_top() functions which you write. 1. For the --count flag, implement a print_words(filename) function that counts how often each word appears in the text and prints: word1 count1 word2 count2 ... Print the above list in order sorted by word (python will sort punctuation to come before letters -- that's fine). Store all the words as lowercase, so 'The' and 'the' count as the same word. 2. For the --topcount flag, implement a print_top(filename) which is similar to print_words() but which prints just the top 20 most common words sorted so the most common word is first, then the next most common, and so on. Use str.split() (no arguments) to split on all whitespace. Workflow: don't build the whole program at once. Get it to an intermediate milestone and print your data structure and sys.exit(0). When that's working, try for the next milestone. Optional: define a helper function to avoid code duplication inside print_words() and print_top(). """ import sys import re def readfile(filename): f=open(filename,'r') text=f.read() text=re.sub("[/.,\';:?!-]","",text) lines=sorted(text.lower().split()) newlines=[] i=1 for i in range(len(lines)): if lines[i]!=lines[i-1]: newlines.append(lines[i]) newlines.append(lines.count(lines[i])) arr=[] for i in range(len(newlines)/2): arr.append(tuple(newlines[i*2:i*2+2])) return arr def print_words(filename): arr=readfile(filename) for i in range (len(arr)): print arr[i][0],'\t',arr[i][1] return def print_top(filename): arr=readfile(filename) newarr=sorted(arr, key=lambda arr:arr[-1:]) for i in range (len(newarr)): print newarr[i][0],'\t',newarr[i][1] return # Define print_words(filename) and print_top(filename) functions. # You could write a helper utility function that reads a file # and builds and returns a word/count dict for it. # Then print_words() and print_top() can just call the utility function. ### # This basic command line argument parsing code is provided and # calls the print_words() and print_top() functions which you must define. def main(): if len(sys.argv) != 3: print 'usage: ./wordcount.py {--count | --topcount} file' sys.exit(1) option = sys.argv[1] filename = sys.argv[2] if option == '--count': print_words(filename) elif option == '--topcount': print_top(filename) else: print 'unknown option: ' + option sys.exit(1) if __name__ == '__main__': main()
[ [ 8, 0, 0.2423, 0.3093, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.4124, 0.0103, 0, 0.66, 0.1429, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.4227, 0.0103, 0, 0.66...
[ "\"\"\"Wordcount exercise\nGoogle's Python class\n\nThe main() below is already defined and complete. It calls print_words()\nand print_top() functions which you write.\n\n1. For the --count flag, implement a print_words(filename) function that counts\nhow often each word appears in the text and prints:", "import...