-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathwhitepaperAnalysis.py
102 lines (83 loc) · 3.3 KB
/
whitepaperAnalysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# TUTORIAL https://www.kernix.com/blog/similarity-measure-of-textual-documents_p12
# GITHUB LIST https://github.com/masonicGIT/ico-whitepapers
# https://stackoverflow.com/questions/44786888/will-word2vec-be-more-efficient-in-text-based-plagiarism-detection-than-wordnet
# from sklearn.datasets import fetch_20newsgroups
from nltk import word_tokenize
from nltk import download
from nltk.corpus import stopwords
import os
from nltk.corpus.reader.plaintext import PlaintextCorpusReader
import numpy as np
from gensim import corpora
from gensim.models import TfidfModel
from gensim.models import LsiModel
from gensim.similarities import MatrixSimilarity
download('punkt')
download('stopwords')
corpusdir = './txts' # Directory of corpus.
all_files = PlaintextCorpusReader(corpusdir, '.*')
fileids = all_files.fileids()
print fileids
print len(fileids)
texts = []
fileindex = []
i = 0;
for fileid in fileids:
texts.append(all_files.raw(fileids=fileid))
fileindex.append(fileid)
i += 1
stop_words = stopwords.words('english')
def preprocess(text):
text = text.lower()
doc = word_tokenize(text)
doc = [word for word in doc if word not in stop_words]
doc = [word for word in doc if word.isalpha()]
return doc
texts_og = texts
corpus = [preprocess(text) for text in texts]
number_of_docs = len(corpus)
print number_of_docs
def filter_docs(corpus, texts, labels, condition_on_doc):
"""
Filter corpus, texts and labels given the function condition_on_doc which takes
a doc.
The document doc is kept if condition_on_doc(doc) is true.
"""
number_of_docs = len(corpus)
if texts is not None:
texts = [text for (text, doc) in zip(texts, corpus)
if condition_on_doc(doc)]
labels = [i for (i, doc) in zip(labels, corpus) if condition_on_doc(doc)]
corpus = [doc for doc in corpus if condition_on_doc(doc)]
print("{} docs removed".format(number_of_docs - len(corpus)))
return (corpus, texts, labels)
corpus, texts, texts_og = filter_docs(corpus, texts, texts_og, lambda doc: (len(doc) != 0))
# print corpus
sims = {'texts': {}}
# This module implements the concept of Dictionary- a mapping between words and their integer ids, created from corpus.
dictionary = corpora.Dictionary(corpus)
print dictionary #prints unique tokens (words) in corpus
corpus_gensim = [dictionary.doc2bow(doc) for doc in corpus] ## converts to B.O.W model- {word, frequency}
# print corpus_gensim
tfidf = TfidfModel(corpus_gensim) #Runs TFID on bag of words model
corpus_tfidf = tfidf[corpus_gensim]
lsi = LsiModel(corpus_tfidf, id2word=dictionary, num_topics=200)
lsi_index = MatrixSimilarity(lsi[corpus_tfidf])
sims['texts']['LSI'] = np.array([lsi_index[lsi[corpus_tfidf[i]]]
for i in range(len(corpus))])
# FIND WAY TO
def most_similar(i, X_sims, topn=None):
"""return the indices of the topn most similar documents with document i
given the similarity matrix X_sims"""
r = np.argsort(X_sims[i])[::-1]
if r is None:
return r
else:
return r[:topn]
# print sims['texts']['LSI']
results = most_similar(36, sims['texts']['LSI'], 5)
print "\nMost similar papers to ", fileindex[36], "\n"
for idx, val in enumerate(results):
print fileindex[val]
print(most_similar(36, sims['texts']['LSI'], 5))
# Returns the most similar whitepapers to yours