-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patht_lda_1day_lemma.py
103 lines (90 loc) · 3.17 KB
/
t_lda_1day_lemma.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 4 16:04:38 2018
@author: LEIHAO
"""
import lda
import sqlite3
import numpy as np
#from scipy.sparse import csr_matrix, save_npz
from nltk.corpus import wordnet
from nltk import word_tokenize, pos_tag
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer
import time
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def is_float(string):
try:
float(string)
return True
except ValueError:
return False
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
self.word_pos=pos_tag(word_tokenize(doc))
return [self.wnl.lemmatize(w,get_wordnet_pos(p)) for w,p in self.word_pos
if len(w)>=3 and not w.isdigit() and not is_float(w) ]
# -----------------------------------
# Extracting features from database
# -----------------------------------
def article_extractor(sqlite_file,start_date, end_date):
conn=sqlite3.connect(sqlite_file)
c=conn.cursor()
articles_2016=c.execute("SELECT article FROM articles WHERE date BETWEEN ? AND ?", (start_date, end_date))
articles_tuple=articles_2016.fetchall()
conn.close()
articles=[item[0] for item in articles_tuple]
return articles
directory='/Users/leihao/Downloads/'
sqlite_file=directory+'nasdaq.db'
start_date, end_date='2016-01-01', '2016-01-02'
#t0=time.time()
articles=article_extractor(sqlite_file,start_date, end_date)
#t1=time.time()
#print("Vectorizer takes {0:.2f} seconds".format(t1-t0))
#conn=sqlite3.connect(sqlite_file)
#c=conn.cursor()
#articles_2016=c.execute("SELECT article FROM articles WHERE date BETWEEN ? AND ?", (start_date, end_date))
#articles_tuple=articles_2016.fetchall()
#conn.close()
#articles=[item[0] for item in articles_tuple]
#t3=time.time()
#import cProfile, pstats, io
#pr=cProfile.Profile()
#pr.enable()
c_vectorizer=CountVectorizer(stop_words='english',min_df=5)
#h_vectorizer=HashingVectorizer(tokenizer=LemmaTokenizer(),stop_words='english',ngram_range=(1,2))
X_c=c_vectorizer.fit_transform(articles)
#pr.disable()
#s=io.StringIO()
#sortby='time'
#ps=pstats.Stats(pr,stream=s).sort_stats(sortby)
#ps.print_stats()
#print(s.getvalue())
#X_h=h_vectorizer.transform(articles)
#t4=time.time()
#print("Vectorizer takes {} seconds".format(t4-t3))
#LDA Modelling
#for num_of_topics in (20,30):
num_of_topics=20
model=lda.LDA(n_topics=num_of_topics,n_iter=1500,random_state=1)
model.fit(X_c)
topic_word=model.topic_word_
n_top_words=20
with open(directory+'t_nolemma_topic'+str(num_of_topics)+'_160101_160102.txt','w+') as f:
for i, topic_dist in enumerate(topic_word):
topic_words=np.array(sorted(c_vectorizer.vocabulary_.keys()))[np.argsort(topic_dist)][:-(n_top_words+1):-1]
f.write('Topic {0} : {1}\n'.format(i, ', '.join(topic_words).encode("utf-8")))