Commit 311b008d authored by Sacha's avatar Sacha Committed by SXibolet@2PITAU
Browse files

training process now just fetches social content; no scoring or sentiment

parent b9ae57fb
Copyright (c) 2015 by Ismini Lourentzou, Graham Dyer, Abhishek Sharma, ChengXiang Zhai
Copyright (c) 2015-2016 by Ismini Lourentzou, Graham Dyer, Abhishek Sharma, ChengXiang Zhai
Some rights reserved. Please see https://creativecommons.org/licenses/by-nc-sa/4.0/
......@@ -7,7 +7,7 @@
Runs server.
:copyright: (c) 2015 I Lourentzou, G Dyer, A Sharma, CX Zhai. Some rights reserved.
:copyright: (c) 2015-2016 I Lourentzou, G Dyer, A Sharma, CX Zhai. Some rights reserved.
:license: CC BY-NC-SA 4.0, see LICENSE for more details.
"""
from flask import Flask, session, redirect, render_template, request, Blueprint, flash, abort
......
......@@ -17,6 +17,7 @@ import urllib, urllib2, cookielib
from sentiment import textblob, is_positive, is_negative, sentistrength
from functools import partial
from operator import is_not
from nltk.corpus import stopwords
MAX_ATTEMPTS = 6
......@@ -32,9 +33,12 @@ class SocialContent(object):
def __init__(self, clean, dirty, training=False):
self.clean = clean
self.dirty = dirty
self.sentiment = self._sentiment(training)
self.is_negative = is_negative(self.sentiment)
self.is_positive = is_positive(self.sentiment)
if not training:
# right now, sentiment is not being taken for training
# however, system exists where SentiStrength will be used if conditional is removed (and training)
self.sentiment = self._sentiment(training)
self.is_negative = is_negative(self.sentiment)
self.is_positive = is_positive(self.sentiment)
def to_dict(self):
return self.__dict__
......@@ -75,7 +79,7 @@ class Comment(SocialContent):
holds basic attributes and finds sentiment
"""
def __init__(self, j, trainging=False):
def __init__(self, j, training=False):
dirty = j['commentBody']
SocialContent.__init__(self, self._clean(dirty), dirty, training)
......@@ -105,14 +109,24 @@ class Article(object):
self.xlarge = 'https://www.nytimes.com/%s' % j['multimedia'][1]['url'] if len(j['multimedia']) > 1 else None
self.published = j['pub_date'][:10]
self.full = self._full_text(training)
self.training = training
if training:
self.title_tweets = twitter_search(self.title, training=training)
sw = set(stopwords.words('english'))
mod_tit = ' '.join(filter(lambda x: x not in sw,
self.title.split()))
self.queried_title = mod_tit
self.title_tweets = twitter_search(mod_tit, training=training)
self.comments = article_comments(self.url, training=training)
# notice that this doesn't include tweets
# notice that this count doesn't include tweets
self.n_comments = len(self.comments)
def to_dict(self):
return self.__dict__ if (self.full is not None and len(self.full)) != 0 else None
if self.full is not None and len(self.full) != 0:
if self.training:
self.title_tweets = map(lambda x: x.to_dict(), self.title_tweets)
return self.__dict__
else:
return None
def _no_html_ab(self):
return BeautifulSoup(self.abstract or self.lead, 'html.parser').getText()
......
......@@ -93,7 +93,9 @@ def index():
@mturk.route('/mark_available')
@require_human
def mark_available():
toggle_being_read(get_collection(), session['reading_url'], False)
toggle_being_read(get_articles_collection(),
session['reading_url'],
False)
return success()
......@@ -111,7 +113,7 @@ def update_doc():
if n_inds > session['n_sentences'] or n_inds < 1:
raise UsageError('no or too many highlights')
col = get_collection()
col = get_articles_collection()
url = session['reading_url']
increment_reads(col, url, inds)
toggle_being_read(col, url, False)
......@@ -126,10 +128,17 @@ def submitted():
return render_template('mturk_submitted.html', **locals())
def get_collection():
def get_db():
client = MongoClient('localhost', MONGO_PORT)
db = client.controversy
return db.training
return client.controversy
def get_articles_collection():
return get_db().articles
def get_tweets_collection():
return get_db().tweets
def increment_reads(col, url, highlights):
......@@ -159,11 +168,26 @@ def toggle_being_read(col, url, dest):
def new_doc(doc):
"""given an API response, make an entry for each article,
preserving the timestamp of the entire response and keyword.
Cache tweets by keyword (not associated with article corpus for speed).
Returns number of articles found.
"""
col = get_collection()
if doc['ok'] != 1:
return 0
ar_col = get_articles_collection()
tw_col = get_tweets_collection()
res = doc['result']
tw_col.insert_one({
'tweets': res['kw_tweets'],
'ts': doc['ts'],
'keyword': doc['ts']
})
n_docs = 0
for article in doc['result']:
for article in res['articles']:
if article is None:
continue
n_docs += 1
a = {
'ts': doc['ts'],
......@@ -173,7 +197,7 @@ def new_doc(doc):
'highlights': []
}
a.update(article)
col.insert_one(a).inserted_id
ar_col.insert_one(a).inserted_id
return n_docs
......
......@@ -30,13 +30,13 @@ if __name__ == '__main__':
os.listdir(TT_DIR)))
terms = list(itertools.chain(*terms_by_file))
if len(sys.argv) > 1:
if sys.argv[1] == 'test':
print('using small subset of training terms')
terms = list(set(terms))[:1]
elif sys.argv[1] in {'--help', 'help'}:
print('''options:\n\t``test`` <-- uses small subset of training terms \n\t\t\tfor testing mturk\n\t```` <-- (no options) loads all training keywords\n\t``help`` or ``--help`` <-- displays this message\n''')
sys.exit(0)
if len(sys.argv) > 1 and sys.argv[1] == 'test':
n_terms = sys.argv[2] if len(sys.argv) == 3 else 2
terms = list(set(terms))[:n_terms]
print('using %s training terms' % len(terms))
elif sys.argv[1] in {'--help', 'help'}:
print('''options:\n\t``test`` <-- uses small subset of training terms \n\t\t\tfor testing mturk\n\t```` <-- (no options) loads all training keywords\n\t``help`` or ``--help`` <-- displays this message\n''')
sys.exit(0)
n_tasks = len(terms)
n_done = 0
......@@ -44,13 +44,13 @@ if __name__ == '__main__':
for term in terms:
update_progress(n_done, n_tasks)
try:
scored_keyword = querier.perform(term, training=True)
social = querier.perform(term, training=True)
except UsageError:
# no articles
continue
n_done += 1
n_docs += mturk.new_doc(scored_keyword)
n_docs += mturk.new_doc(social)
update_progress(n_done, n_tasks)
print('\t... done')
......
# -*- coding: utf-8 -*-
from config import REDIS_HOST, REDIS_PORT
from scoring import controversy
from content import article_search, twitter_search, article_comments
from content import article_search, twitter_search
import db
import redis
import json
......@@ -16,8 +16,20 @@ def perform(keyword, training=False):
if len(articles) == 0:
raise UsageError('no-articles', status_code=200)
if training:
res = {
'articles': map(lambda x: x.to_dict(),
articles),
'kw_tweets': map(lambda x: x.to_dict(),
twitter_search(keyword, training=training))
}
else:
res = controversy(articles,
twitter_search(keyword, training=training),
_filter=~training)
return {
'result': controversy(articles, twitter_search(keyword, training=training), _filter=~training),
'training': training,
'result': res,
'ts': datetime.datetime.utcnow(),
'keyword': keyword,
'ok': 1
......@@ -32,7 +44,7 @@ def new_query(keyword):
ranked['ts'] = ranked['ts'].isoformat()
ranked_dump = json.dumps(ranked)
sr.set(keyword, ranked_dump)
# expire cache in 60 * 60 * 24 = 86400 seconds (24 hours)
# expire cache in 60 * 60 * 24 = 86400 seconds = 24 hours
sr.expire(keyword, 86400)
keyword_score = sum(a['score'] for a in ranked['result'])
db.append_queries(keyword, keyword_score)
......
......@@ -165,6 +165,8 @@ def controversy(articles, social_content, _filter=True):
ranked_articles[article_index] = dict_art
sentiment_score, linguistic_score = 0, 0 # (entropy) score of the entire article
sentences = [] # scores & metadata for each sentence
# ``|*^*|`` is added by ``content.py`` to mark new paragraphs
# ... only in training
query = tokenizer.tokenize(articles[article_index].full.replace('|*^*|', ''))
# for every sentence in the article
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment