Skip to content

Commit

Permalink
add python 3 support!
Browse files Browse the repository at this point in the history
  • Loading branch information
snarfed committed Jan 15, 2018
1 parent 23a45e1 commit 5f53c44
Show file tree
Hide file tree
Showing 29 changed files with 707 additions and 583 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@
/.eggs/
/*.egg-info
/l
/l3
/local/
/local3/
circleci_token
datastore.dat
oauth_client_secret
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ Granary is a library and REST API that fetches and converts between a wide varie
Here's how to get started:

* Granary is [available on PyPi.](https://pypi.python.org/pypi/granary/) Install with `pip install granary`.
* Supports Python 2.7+ and 3.4+.
* [Click here for getting started docs.](#using)
* [Click here for reference docs.](https://granary.readthedocs.io/en/latest/source/granary.html)
* The REST API and demo app are deployed at [granary.io](https://granary.io/).
Expand Down Expand Up @@ -223,6 +224,7 @@ On the open source side, there are many related projects. [php-mf2-shim](https:/
Changelog
---
### 1.11 - unreleased
* Add Python 3 support!
* Twitter:
* Prefer MP4 and other video/... content types to HLS (.m3u8) etc. [Background.](https://twittercommunity.com/t/retiring-mp4-video-output/66093)
* Prefer HTTPS URLs for media images.
Expand Down
5 changes: 4 additions & 1 deletion granary/as2.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
AS1: http://activitystrea.ms/specs/json/1.0/
http://activitystrea.ms/specs/json/schema/activity-schema.html
"""
from __future__ import unicode_literals
from past.builtins import basestring

import copy
import logging

Expand Down Expand Up @@ -105,7 +108,7 @@ def all_from_as1(field, type=None):
obj['location'] = from_as1(loc, type='Place', context=None)

obj = util.trim_nulls(obj)
if obj.keys() == ['url']:
if list(obj.keys()) == ['url']:
return obj['url']

return obj
Expand Down
30 changes: 19 additions & 11 deletions granary/atom.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,16 @@
Atom spec: https://tools.ietf.org/html/rfc4287 (RIP atomenabled.org)
"""
from __future__ import absolute_import, unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import str

import collections
import mimetypes
import os
import re
import urlparse
import urllib.parse
from xml.etree import ElementTree
import xml.sax.saxutils

Expand All @@ -16,8 +21,8 @@
import mf2util
from oauth_dropins.webutil import util

import microformats2
import source
from . import microformats2
from . import source

FEED_TEMPLATE = 'user_feed.atom'
ENTRY_TEMPLATE = 'entry.atom'
Expand Down Expand Up @@ -60,9 +65,10 @@ def _text(elem, field=None):
if ':' not in field:
field = 'atom:' + field
elem = elem.find(field, NAMESPACES)

if elem is not None and elem.text:
text = elem.text
if not isinstance(elem.text, unicode):
if not isinstance(elem.text, str):
text = text.decode('utf-8')
return text.strip()

Expand Down Expand Up @@ -96,7 +102,9 @@ def __init__(self, **kwargs):
for k, v in kwargs.items()})

def __unicode__(self):
return super(Defaulter, self).__unicode__() if self else u''
return super(Defaulter, self).__unicode__() if self else ''

__str__ = __unicode__

def __hash__(self):
return super(Defaulter, self).__hash__() if self else None.__hash__()
Expand Down Expand Up @@ -177,7 +185,7 @@ def atom_to_activities(atom):
Returns:
list of ActivityStreams activity dicts
"""
assert isinstance(atom, unicode)
assert isinstance(atom, str)
parser = ElementTree.XMLParser(encoding='UTF-8')
feed = ElementTree.XML(atom.encode('utf-8'), parser=parser)
if _tag(feed) != 'feed':
Expand All @@ -194,7 +202,7 @@ def atom_to_activity(atom):
Returns:
dict, ActivityStreams activity
"""
assert isinstance(atom, unicode)
assert isinstance(atom, str)
parser = ElementTree.XMLParser(encoding='UTF-8')
entry = ElementTree.XML(atom.encode('utf-8'), parser=parser)
if _tag(entry) != 'entry':
Expand Down Expand Up @@ -385,10 +393,10 @@ def _prepare_activity(a, reader=True):
if not image:
continue
url = image.get('url')
parsed = urlparse.urlparse(url)
parsed = urllib.parse.urlparse(url)
scheme = parsed.scheme
netloc = parsed.netloc
rest = urlparse.urlunparse(('', '') + parsed[2:])
rest = urllib.parse.urlunparse(('', '') + parsed[2:])
img_src_re = re.compile(r"""src *= *['"] *((https?:)?//%s)?%s *['"]""" %
(re.escape(netloc), re.escape(rest)))
if (url and url not in image_urls_seen and
Expand All @@ -406,6 +414,6 @@ def _prepare_activity(a, reader=True):


def _remove_query_params(url):
parsed = list(urlparse.urlparse(url))
parsed = list(urllib.parse.urlparse(url))
parsed[4] = ''
return urlparse.urlunparse(parsed)
return urllib.parse.urlunparse(parsed)
60 changes: 31 additions & 29 deletions granary/facebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
Retrieving @all activities from :meth:`get_activities()` (the default) currently
returns an incomplete set of activities, ie *NOT* exactly the same set as your
Facebook News Feed: https://www.facebook.com/help/327131014036297/
"""
"""
This is complicated, and I still don't fully understand how or why they differ,
but based on lots of experimenting and searching, it sounds like the current
state is that you just can't reproduce the News Feed via Graph API's /me/home,
Expand Down Expand Up @@ -42,20 +41,23 @@
See the fql_stream_to_post() method below for code I used to experiment with the
FQL stream table.
"""
from __future__ import absolute_import, division, unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import range, str, zip

import collections
import copy
import itertools
import json
import logging
import re
import urllib
import urllib2
import urlparse
import urllib.error, urllib.parse, urllib.request
import mf2util

import appengine_config
from . import appengine_config
from oauth_dropins.webutil import util
import source
from . import source

# Since API v2.4, we need to explicitly ask for the fields we want from most API
# endpoints with ?fields=...
Expand Down Expand Up @@ -176,13 +178,13 @@
}
# https://developers.facebook.com/docs/graph-api/reference/post/reactions
REACTION_CONTENT = {
'LOVE': u'❤️',
'WOW': u'😮',
'HAHA': u'😆',
'SAD': u'😢',
'ANGRY': u'😡',
'THANKFUL': u'🌼', # https://github.com/snarfed/bridgy/issues/748
'PRIDE': u'🏳️‍🌈',
'LOVE': '❤️',
'WOW': '😮',
'HAHA': '😆',
'SAD': '😢',
'ANGRY': '😡',
'THANKFUL': '🌼', # https://github.com/snarfed/bridgy/issues/748
'PRIDE': '🏳️‍🌈',
# nothing for LIKE (it's a like :P) or for NONE
}

Expand Down Expand Up @@ -305,7 +307,7 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
resp = self.urlopen(url, headers=headers, _as=None)
etag = resp.info().get('ETag')
posts = self._as(list, source.load_json(resp.read(), url))
except urllib2.HTTPError, e:
except urllib.error.HTTPError as e:
if e.code == 304: # Not Modified, from a matching ETag
posts = []
else:
Expand Down Expand Up @@ -428,7 +430,7 @@ def _merge_photos(self, posts):
photo['privacy'] = 'custom' # ie unknown

return ([p for p in posts if not p.get('object_id')] +
posts_by_obj_id.values() + photos)
list(posts_by_obj_id.values()) + photos)

def _split_id_requests(self, api_call, ids):
"""Splits an API call into multiple to stay under the MAX_IDS limit per call.
Expand Down Expand Up @@ -508,7 +510,7 @@ def get_comment(self, comment_id, activity_id=None, activity_author_id=None,
"""
try:
resp = self.urlopen(API_COMMENT % comment_id)
except urllib2.HTTPError, e:
except urllib.error.HTTPError as e:
if e.code == 400 and '_' in comment_id:
# Facebook may want us to ask for this without the other prefixed id(s)
resp = self.urlopen(API_COMMENT % comment_id.split('_')[-1])
Expand Down Expand Up @@ -676,7 +678,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,
'<a href="%s">%s</a>' % (
tag.get('url'), tag.get('displayName') or 'User %s' % tag['id'])
for tag in people)
msg_data = {'message': content.encode('utf-8')}
msg_data = collections.OrderedDict({'message': content.encode('utf-8')})
if appengine_config.DEBUG:
msg_data['privacy'] = json.dumps({'value': 'SELF'})

Expand All @@ -699,7 +701,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,
if image_url:
msg_data['attachment_url'] = image_url
resp = self.urlopen(API_PUBLISH_COMMENT % base_id,
data=urllib.urlencode(msg_data))
data=urllib.parse.urlencode(msg_data))
url = self.comment_url(base_id, resp['id'],
post_author_id=base_obj.get('author', {}).get('id'))
resp.update({'url': url, 'type': 'comment'})
Expand Down Expand Up @@ -799,7 +801,7 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,
# https://developers.facebook.com/docs/graph-api/reference/user/feed#pubfields
msg_data['tags'] = ','.join(tag['id'] for tag in people)

resp = self.urlopen(api_call, data=urllib.urlencode(msg_data))
resp = self.urlopen(api_call, data=urllib.parse.urlencode(msg_data))
resp.update({'url': self.post_url(resp), 'type': 'post'})
if video_url and not resp.get('success', True):
msg = 'Video upload failed.'
Expand Down Expand Up @@ -845,7 +847,7 @@ def _get_person_tags(self, obj):
tag['id'] = id
people[id] = tag

return people.values()
return list(people.values())

def create_notification(self, user_id, text, link):
"""Sends the authenticated user a notification.
Expand All @@ -871,7 +873,7 @@ def create_notification(self, user_id, text, link):
appengine_config.FACEBOOK_APP_SECRET),
}
url = API_BASE + API_NOTIFICATION % user_id
resp = util.urlopen(urllib2.Request(url, data=urllib.urlencode(params)))
resp = util.urlopen(urllib.request.Request(url, data=urllib.parse.urlencode(params)))
logging.debug('Response: %s %s', resp.getcode(), resp.read())

def post_url(self, post):
Expand Down Expand Up @@ -933,8 +935,8 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False):
base_obj = self.user_to_actor(self.urlopen(base_id))

try:
parsed = urlparse.urlparse(url)
params = urlparse.parse_qs(parsed.query)
parsed = urllib.parse.urlparse(url)
params = urllib.parse.parse_qs(parsed.query)
assert parsed.path.startswith('/')
path = parsed.path.strip('/')
path_parts = path.split('/')
Expand Down Expand Up @@ -984,7 +986,7 @@ def base_object(self, obj, verb=None, resolve_numeric_id=False):
# add author user id prefix. https://github.com/snarfed/bridgy/issues/229
base_obj['id'] = '%s_%s' % (author['numeric_id'], base_id)

except BaseException, e:
except BaseException as e:
logging.error(
"Couldn't parse object URL %s : %s. Falling back to default logic.",
url, e)
Expand Down Expand Up @@ -1721,7 +1723,7 @@ def urlopen(self, url, _as=dict, **kwargs):
log_url = url
if self.access_token:
url = util.add_query_params(url, [('access_token', self.access_token)])
resp = util.urlopen(urllib2.Request(url, **kwargs))
resp = util.urlopen(urllib.request.Request(url, **kwargs))

if _as is None:
return resp
Expand Down Expand Up @@ -1781,8 +1783,8 @@ def urlopen_batch(self, urls):
for url, resp in zip(urls, resps):
code = int(resp.get('code', 0))
body = resp.get('body')
if code / 100 in (4, 5):
raise urllib2.HTTPError(url, code, body, resp.get('headers'), None)
if code // 100 in (4, 5):
raise urllib.error.HTTPError(url, code, body, resp.get('headers'), None)
bodies.append(body)

return bodies
Expand Down Expand Up @@ -1825,7 +1827,7 @@ def urlopen_batch_full(self, requests):
req['headers'] = [{'name': n, 'value': v}
for n, v in req['headers'].items()]

data = 'batch=' + json.dumps(util.trim_nulls(requests),
data = 'batch=' + json.dumps(util.trim_nulls(requests), sort_keys=True,
separators=(',', ':')) # no whitespace
resps = self.urlopen('', data=data, _as=list)

Expand Down
Loading

0 comments on commit 5f53c44

Please sign in to comment.