Commit a37588f5 authored by Sacha's avatar Sacha Committed by SXibolet@2PITAU
Browse files

essentials complete for mturk training

parent 77f63646
*.html linguist-vendored
......@@ -4,6 +4,7 @@ venv
*.sass-cache
*.css
*.css.map
*.swp
.DS_Store
controversy/sentistrength
notes.rst
......@@ -11,21 +11,23 @@
:copyright: (c) 2015 I Lourentzou, G Dyer, A Sharma, C Zhai. Some rights reserved.
:license: CC BY-NC-SA 4.0, see LICENSE for more details.
"""
from flask import Flask, session, redirect, render_template, request, Blueprint, flash, abort, url_for
from flask import Flask, session, redirect, render_template, request, Blueprint, flash, abort
from jinja2 import TemplateNotFound
from functools import wraps
from api import api
from mturk import mturk
from config import *
from datetime import datetime
from hashlib import md5
from dateutil.relativedelta import relativedelta
import time
import db
import forms
from digest import digest
application = Flask(__name__)
application.register_blueprint(api, url_prefix='/api')
application.register_blueprint(mturk, url_prefix='/training')
application.secret_key = SECRET_KEY
application.config['RECAPTCHA_PUBLIC_KEY'] = CAPTCHA_PUBLIC
application.config['RECAPTCHA_PRIVATE_KEY'] = CAPTCHA_PRIVATE
......@@ -33,11 +35,6 @@ application.config['testing'] = DEBUG
application.config['version'] = 'v0.3'
def digest(static):
with open('static/%s' % static) as f:
return "%s?v=%s" % (url_for('static', filename=static), md5(f.read()).hexdigest())
def get_added_styles():
webkit = digest('webkit.css') if session.get('webkit') == 'webkit' else None
safari = digest('safari.css') if session.get('safari') == 'safari' else None
......
......@@ -3,6 +3,7 @@ DB_PORT = 3306
DB_NAME = 'controversy'
DB_PASSWORD = 'pwd' # probably REQUIRED change
DB_USER = 'root' # maybe REQUIRED change
MONGO_PORT = 27017
SECRET_KEY = '23jlvafi23josdnnaDFASs' # should change if deployed publically
API_KEY = 'twitter api' # REQUIRED change
API_SECRET = 'twitter api secret' # REQUIRED change
......
......@@ -19,36 +19,71 @@ from functools import partial
from operator import is_not
MAX_ATTEMPTS = 10
MAX_ATTEMPTS = 6
MAX_COMMENTARY = 500
TAG_RE = re.compile(r'<[^>]+>')
class Tweet(object):
class SocialContent(object):
def __init__(self, clean, dirty):
self.clean = clean
self.dirty = dirty
self.sentiment = self._sentiment()
self.is_negative = is_negative(self.sentiment)
self.is_positive = is_positive(self.sentiment)
def to_dict(self):
return self.__dict__
def _sentiment(self):
return textblob(self.clean)
class Tweet(SocialContent):
"""A tweet
holds basic attributes and finds sentiment
"""
def __init__(self, j):
self.tweet = j['text']
self.clean_tweet = self._clean()
dirty = j['text']
SocialContent.__init__(self, self._clean(dirty), dirty)
self.ts = j['created_at']
self.retweets = j['retweet_count']
self.retweeted = j['retweeted']
self.location = j['user']['location']
self.author = j['user']['screen_name']
self.n_statuses = j['user']['statuses_count']
self.time_zone = j['user']['time_zone']
self.followers = j['user']['followers_count']
self.pimg = j['user']['profile_image_url']
self.identifier = j['id']
self.sentiment = self._sentiment()
self.is_negative = is_negative(self.sentiment)
self.is_positive = is_positive(self.sentiment)
def to_dict(self):
return self.__dict__
def _clean(self):
return ' '.join(re.sub(r"(?:\@|https?\://)\S+", "", self.tweet.strip('#')).split())
def _clean(self, dirty):
return ' '.join(re.sub(r"(?:\@|https?\://)\S+", "", dirty.strip('#')).split())
def _sentiment(self):
return textblob(self.clean_tweet)
class Comment(SocialContent):
"""A comment
holds basic attributes and finds sentiment
"""
def __init__(self, j):
dirty = j['commentBody']
SocialContent.__init__(self, self._clean(dirty), dirty)
self.userLocation = j['userLocation']
self.n_replies = j['replyCount']
self.ts = j['updateDate']
self.n_recommendations = j['recommendations']
self.abuseFlag = j['reportAbuseFlag']
def _clean(self, dirty):
return TAG_RE.sub('', dirty)
class Article(object):
"""A NYT article
......@@ -66,6 +101,7 @@ class Article(object):
self.xlarge = 'https://www.nytimes.com/%s' % j['multimedia'][1]['url'] if len(j['multimedia']) > 1 else None
self.published = j['pub_date'][:10]
self.full = self._full_text()
self.comments = article_comments(self.url)
def to_dict(self):
return self.__dict__ if (self.full is not None and len(self.full)) != 0 else None
......@@ -88,13 +124,6 @@ class Article(object):
jar.clear()
return res
def _comments(self):
params = urllib.urlencode({
'api-key' : NYT_COMMUNITY_KEY,
'url' : self.url
})
response = urllib2.urlopen("http://api.nytimes.com/svc/community/v3/user-content/url.json?%s" % params)
def nyt_query_date(s):
return s.strftime('%Y%m%d')
......@@ -107,19 +136,44 @@ def article_search(keyword):
last_week = today - datetime.timedelta(days=11)
params = urllib.urlencode({
'q' : keyword,
'begin_date' : nyt_query_date(last_week),
'end_date' : nyt_query_date(today),
'api-key' : NYT_KEY,
'facet_field' : 'source'
'q': keyword,
'begin_date': nyt_query_date(last_week),
'end_date': nyt_query_date(today),
'api-key': NYT_KEY,
'facet_field': 'source'
})
response = urllib2.urlopen("http://api.nytimes.com/svc/search/v2/articlesearch.json?%s" % params)
response = urllib2.urlopen('http://api.nytimes.com/svc/search/v2/articlesearch.json?%s' % params)
# an Article will be None if it doesn't have body text (thus the partial)
# return an array of Article objects that have a body text
return filter(partial(is_not, None), map(lambda x: Article(x), json.loads(response.read())['response']['docs']))
def article_comments(url, offset=0):
comments = []
for i in xrange(MAX_ATTEMPTS):
if MAX_COMMENTARY < len(comments):
break
params = urllib.urlencode({
'url': url,
'api-key': NYT_COMMUNITY_KEY,
'offset': i * 25
})
response = urllib2.urlopen('http://api.nytimes.com/svc/community/v3/user-content/url.json?%s' % params)
try:
comment_batch = json.loads(response.read())['results']['comments']
except ValueError:
# no json could be decoded
break
comments += map(lambda x: Comment(x).to_dict(), comment_batch)
return comments
def twitter_search(keyword):
twitter = get_auth()
tweets = []
......@@ -136,7 +190,6 @@ def twitter_search(keyword):
except KeyError:
break
print('found ==> ' + str(len(tweets)) + ' tweets')
return tweets
......
#!/bin/bash
# setup NGINX on 14.04 LTS to run Controversy
# run as root
cd ~
adduser deployer
adduser deployer sudo
sudo apt-get install -y git
sudo apt-get update
sudo apt-get install -y gunicorn supervisor nginx python python-pip python-virtualenv python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose redis-server mysql-server
mkdir /home/www
cd /home/www
git clone https://github.com/gdyer/controversy
cd controversy/server
virtualenv --system-site-packages venv
. venv/bin/activate
sudo pip install -r requirements.txt
echo "import nltk
nltk.download('all')" > get-nltk.py
python get-nltk.py
rm get-nltk.py
mv sample-config.py config.py
sudo /etc/init.d/nginx start
rm /etc/nginx/sites-enabled/default
touch /etc/nginx/sites-available/controversy
ln -s /etc/nginx/sites-available/controversy /etc/nginx/sites-enabled/controversy
mv controversy /etc/nginx/sites-enabled/
sudo /etc/init.d/nginx restart
echo "[program:controversy]
command=gunicorn app:app -b localhost:8000 --log-file /var/log/gunicorn/log.log
directory=/home/www/controversy/server
user=root
stderr_logfile=/var/log/supervisor/error.log
stdout_logfile=/var/log/supervisor/out.log" > /etc/supervisor/conf.d/controversy.conf
supervisorctl reread
supervisorctl update
supervisorctl start controversy
echo "\tEnter your MySQL password, then: CREATE DATABASE controversy; USE controversy; SOURCE schema.sql;
\n\nAfterwards, be sure to edit server/config.py with correct credentials"
mysql -p
# -*- coding: utf-8 -*-
from flask import url_for
from hashlib import md5
def digest(static):
with open('static/%s' % static) as f:
return "%s?v=%s" % (url_for('static', filename=static), md5(f.read()).hexdigest())
Please use Python 2.7.x
.. |...| unicode:: U+2026 .. ldots
This is not for the faint-hearted; we really do recommend you use the web client.
If you're on a fresh machine or don't use git, Python, or MySQL much, read the two paragraphs below before proceeding.
Please use Python 2.7.x
#. ``$ git clone https://github.com/gdyer/controversy``
#. ``$ cd controversy/controversy``
#. get an academic SentiStrength license by sending an email to `the address listed here`_, move the ``jar`` and data into ``sentistrength`` with names ``SentiStrengthCom.jar`` and ``data-11`` respectively. If you like we could ask if we can allow you to use our license. You'll probably need to be in academia either way.
#. ``$ mv config.py.default config.py``, and change credentials where marked as ``REQUIRED``. You'll need to register for NYTimes' `Article Search API`_ and `Twitter's API`_.
#. get an academic SentiStrength license by sending an email to `the address listed here`_, move the ``jar`` and data into ``sentistrength`` with names ``SentiStrengthCom.jar`` and ``data-11`` respectively. You'll need to be in academia to get a free license.
#. ``$ mv config.py.default config.py``, and change credentials where marked as ``REQUIRED``. You'll need to register for NYTimes' `Article Search`_ and `Community`_ APIs and `Twitter's API`_.
#. create a MySQL database called "controversy" with ``source schema.sql``.
#. install MongoDB and start the server with the default port. Leave with ``<Ctrl>+C`` only if on Debian/Ubuntu. Otherwise, keep ``sudo mongod`` open, and continue in a new shell. NoSQL is ideal for how we do training. We'll soon create a simple way of downloading our data, which, once downloaded, can be loaded into MongoDB with a script we'll also make shortly |...|
#. install redis, run ``redis-server``. Leave with ``<Ctrl>+C`` only if on Debian/Ubuntu. Otherwise, keep ``redis-server`` open, and continue in a new shell.
#. satisfy SciPy `dependencies`_
#. ``$ pip install virtualenv``
......@@ -20,13 +23,9 @@ If you're on a fresh machine or don't use git, Python, or MySQL much, read the t
#. ``$ python app.py``
#. navigate to ``localhost:4040`` in your browser. See `API spec`_ for routes.
These steps should would out of the box on Debian-based machines. There, MySQL is ``mysql-server``, redis is ``redis-server`` and the SciPy packages are ``python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose``.
Apple changed permissions in El Capitan. We recommend using the ``brew`` package-manager to try to get around these. Install with ``$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"``. Ensure you have XCode CLI tools installed (install XCode for the Mac App Store and ``$ xcode-select --install``). ``brew`` packages of interest include ``redis``, ``mysql`` (then do ``$ mysql.server start``). You should also have ``pip`` to install the Python-specific dependencies: ``sudo easy_install pip``. El Capitan users may need to reinstall Python to get around strange "permission denied" errors: ``brew reinstall python``. You'll then need to install SciPy (matplotlib, numpy).
.. _API spec: controversy/README.rst
.. _API spec: http://ocha.2pitau.org/controversy-docs
.. _dependencies: http://www.scipy.org/install.html
.. _Article Search API: http://developer.nytimes.com/docs/read/article_search_api_v2
.. _Article Search: http://developer.nytimes.com/apps/mykeys
.. _Community: http://developer.nytimes.com/apps/mykeys
.. _Twitter's API: https://apps.twitter.com/
.. _The address listed here: http://sentistrength.wlv.ac.uk
.. _the address listed here: http://sentistrength.wlv.ac.uk
No preview for this file type
......@@ -2,9 +2,10 @@
"""
forms.py
~~~~~~~~
Forms referenced in app.
"""
from flask_wtf import Form#, RecaptchaField
from flask_wtf import Form, RecaptchaField
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email
import db
......@@ -66,3 +67,12 @@ class Login(Form):
self.username = username
db.logged_in(username)
return True
class BeginHIT(Form):
recaptcha = RecaptchaField()
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
def validate(self):
return Form.validate(self)
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request, session, render_template
from digest import digest
from pymongo import MongoClient
from config import MONGO_PORT
import content
import datetime
import forms
from error import UsageError
import nltk.data
mturk = Blueprint('/training', __name__)
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
@mturk.errorhandler(UsageError)
def handle_error(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@mturk.route('/', methods=['GET', 'POST'])
def index():
css = digest('mturk/highlight.css')
if session.get('human') != 'yes':
form = forms.BeginHIT()
if form.validate_on_submit():
session['human'] = 'yes'
else:
return render_template('mturk_welcome.html', **locals())
article = get_next_doc()
article['full'] = sent_detector.tokenize(article['full'].strip())
session['started_reading'] = datetime.datetime.utcnow()
return render_template('mturk_highlight.html',
css=css,
article=article)
@mturk.route('/submit', methods=['GET', 'POST'])
def update_doc(data, url):
if 'sis' not in request.args:
raise UsageError('missing argument!')
col = get_collection()
increment_reads(col, url)
toggle_being_read(col, url, False)
def get_collection():
client = MongoClient('localhost', MONGO_PORT)
db = client.controversy
return db.training
def increment_reads(col, url):
return col.update_one({
'url': url
}, {
'$inc': {
'n_reads': 1
}
}).modified_count
def toggle_being_read(col, url, dest):
return col.update_one({
'url': url
}, {
'$set': {
'being_read': dest
}
})
def new_doc(doc):
"""given an API response, make an entry for each article,
preserving the timestamp of the entire response and keyword.
"""
col = get_collection()
n_docs = 0
for article in doc['result']:
n_docs += 1
a = {
'ts': doc['ts'],
'keyword': doc['keyword'],
'n_reads': 0,
'being_read': False
}
a.update(article)
col.insert_one(a).inserted_id
return n_docs
def get_next_doc():
col = get_collection()
poss = col.find({
'n_reads' : {
'$lt': 5
}
}).sort([
('being_read', 1)
])
to_be_read = poss[:][0]
toggle_being_read(col, to_be_read['url'], True)
return to_be_read
# -*- coding: utf-8 -*-
import mturk
import querier
import os
import itertools
from error import UsageError
import sys
TT_DIR = 'training_terms'
def get_file(name):
with open(name, 'r') as f:
data = f.readlines()
return data
def update_progress(n_done, n_tasks):
pprogress = int(100 * (float(n_done) / n_tasks))
sys.stdout.write('\r working ... [ %s ] %s%%'
% ('#' * (pprogress / 5), pprogress))
sys.stdout.flush()
if __name__ == '__main__':
terms_by_file = map(lambda x: get_file('%s/%s' % (TT_DIR, x)),
filter(lambda x: x.endswith('.txt'),
os.listdir(TT_DIR)))
terms = list(itertools.chain(*terms_by_file))
if len(sys.argv) > 1:
if sys.argv[1] == 'test':
terms = list(set(terms))[:5]
elif sys.argv[1] in {'--help', 'help'}:
print('''options:\n\t``test`` <== uses small subset of training terms \n\t\t\tfor testing mturk\n\t```` <== (no options) loads all training keywords\n\t``help`` or ``--help`` <== displays this message\n''')
sys.exit(0)
n_tasks = len(terms)
n_done = 0
n_docs = 0
for term in terms:
update_progress(n_done, n_tasks)
try:
scored_keyword = querier.perform(term)
except UsageError:
# no articles
continue
n_done += 1
n_docs += mturk.new_doc(scored_keyword)
update_progress(n_done, n_tasks)
print('\t... done')
print('\t ... summary: %s doc(s) ready for training' % n_docs)
# -*- coding: utf-8 -*-
from config import REDIS_HOST, REDIS_PORT
from scoring import controversy
from content import article_search, twitter_search
from content import article_search, twitter_search, article_comments
import db
import redis
import json
import datetime
sr = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
def new_query(keyword):
"""Provide ``keyword`` for content retrieval,
scoring, cache, and history entry.
def perform(keyword):
"""Provide ``keyword`` for content retrieval, scoring.
"""
articles = article_search(keyword)
if len(articles) == 0:
raise UsageError('no-articles', status_code=200)
scored = controversy(articles, twitter_search(keyword))
ranked = {
'result': scored,
return {
'result': controversy(articles, twitter_search(keyword)),
'ts': datetime.datetime.utcnow(),
'keyword': keyword,
'ok': 1
}
def new_query(keyword):
"""Provide ``keyword`` for content retrieval,
scoring, cache, and history entry.
"""
ranked = perform(keyword)
ranked['ts'] = ranked['ts'].isoformat()
ranked_dump = json.dumps(ranked)
sr.set(keyword, ranked_dump)
# expire cache in 60 * 60 * 24 = 86400 seconds (24 hours)
sr.expire(keyword, 86400)
keyword_score = sum(a['score'] for a in scored)
keyword_score = sum(a['score'] for a in ranked['result'])
db.append_queries(keyword, keyword_score)
return ranked_dump
......@@ -13,3 +13,4 @@ nltk
gensim
scipy
matplotlib
pymongo
......@@ -44,10 +44,10 @@ class BM25:
def build_dictionary(self):
proc_data = []
for i in xrange(len(self.fn_docs)):
tweet = self.fn_docs[i].clean_tweet
text = self.fn_docs[i].clean
# sentiment = self.fn_docs[i]["sentiment"]
# self.raw_data.append(self.fn_docs[i])
proc_data.append(preprocess(tweet))
proc_data.append(preprocess(text))
self.dictionary.add_documents(proc_data)
......@@ -55,8 +55,8 @@ class BM25:
docTotalLen = 0.0
# print (self.dictionary.token2id)
for i in xrange(len(self.fn_docs)):
tweet = self.fn_docs[i].clean_tweet
doc = preprocess(tweet)
text = self.fn_docs[i].clean
doc = preprocess(text)
docTotalLen += len(doc)
self.DocLen.append(len(doc))
# print self.dictionary.doc2bow(doc)
......@@ -155,8 +155,8 @@ def sentiment_conditional(l):
return res
def controversy(articles, tweets):
bm25 = BM25(tweets, delimiter=' ')
def controversy(articles, social_content):
bm25 = BM25(social_content, delimiter=' ')
tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
article_count = len(articles)
ranked_articles = [{}] * article_count
......@@ -177,15 +177,15 @@ def controversy(articles, tweets):
scores = bm25.bm25_score(preprocess(sentence))
sentiments, extremes, caps, relevant_tweets = [], [], [], []
# for every tweet relevant to the sentence
# for every social content relevant to the sentence ...
for tweet in scores:
# get the index of the relevant tweet
doc_index = tweet[1]
relevant_tweet = tweets[doc_index]
relevant_tweet = social_content[doc_index]
sentiment = relevant_tweet.sentiment
sentiments.append(sentiment)
words = relevant_tweet.clean_tweet.split(' ')
words = relevant_tweet.clean.split(' ')
extreme_words_count = sum(map(lambda x: int(is_extreme(x)), words))
extremes.append(X_s(extreme_words_count > 0, sentiment))
......