Commit fd641fe8 authored by Sacha's avatar Sacha Committed by SXibolet@2PITAU
Browse files

working well

parent 723c9800
...@@ -21,7 +21,7 @@ from sentiment import textblob, is_positive, is_negative, sentistrength ...@@ -21,7 +21,7 @@ from sentiment import textblob, is_positive, is_negative, sentistrength
from functools import partial from functools import partial
from operator import is_not from operator import is_not
from nltk.corpus import stopwords from nltk.corpus import stopwords
from twython.exceptions import TwythonRateLimitError from twython.exceptions import TwythonRateLimitError, TwythonError
MAX_ATTEMPTS = 6 MAX_ATTEMPTS = 6
...@@ -240,13 +240,13 @@ def twitter_search(keyword, training=False): ...@@ -240,13 +240,13 @@ def twitter_search(keyword, training=False):
try: try:
response = twitter.search(**kwargs) response = twitter.search(**kwargs)
except TwythonRateLimitError: except:
# exceeded rate limits # bad form but a variety of errors could be thrown
# from exceeded rate limits
curr_comb += 1 curr_comb += 1
print(curr_comb)
twitter = get_auth(curr_comb) twitter = get_auth(curr_comb)
i -= 1 i -= 1
break continue
tweets += map(lambda x: Tweet(x, training=training), response['statuses']) tweets += map(lambda x: Tweet(x, training=training), response['statuses'])
......
...@@ -202,7 +202,7 @@ def new_doc(doc): ...@@ -202,7 +202,7 @@ def new_doc(doc):
def get_next_doc(): def get_next_doc():
col = get_collection() col = get_articles_collection()
poss = col.find({ poss = col.find({
'n_reads' : { 'n_reads' : {
'$lt': 3 '$lt': 3
...@@ -220,3 +220,15 @@ def get_next_doc(): ...@@ -220,3 +220,15 @@ def get_next_doc():
return None return None
toggle_being_read(col, to_be_read['url'], True) toggle_being_read(col, to_be_read['url'], True)
return to_be_read return to_be_read
def keyword_exists(kw):
col = get_tweets_collection()
poss = col.find({
'keyword': {
'$exists': kw
}
})
print(poss)
return poss
...@@ -19,7 +19,7 @@ TT_DIR = 'training_terms' ...@@ -19,7 +19,7 @@ TT_DIR = 'training_terms'
def get_file(name): def get_file(name):
with open(name, 'r') as f: with open(name, 'r') as f:
data = f.readlines() data = f.readlines()
return data return map(lambda x: x.strip(), data)
def update_progress(n_done, n_tasks): def update_progress(n_done, n_tasks):
...@@ -48,17 +48,16 @@ if __name__ == '__main__': ...@@ -48,17 +48,16 @@ if __name__ == '__main__':
n_done = 0 n_done = 0
n_docs = 0 n_docs = 0
for term in terms: for term in terms:
update_progress(n_done, n_tasks)
try: try:
social = querier.perform(term, training=True) social = querier.perform(term, training=True)
except UsageError: except UsageError:
# no articles # no articles
continue continue
n_done += 1
n_docs += mturk.new_doc(social) n_docs += mturk.new_doc(social)
n_done += 1
update_progress(n_done, n_tasks) update_progress(n_done, n_tasks)
print('\t... done') print('\t... done')
summary = 'summary: %s doc(s) ready for training from %s terms' % (n_docs, n_tasks) summary = 'summary: %s doc(s) ready for training from %s terms' % (n_docs, n_tasks)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment