-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
64 lines (47 loc) · 1.92 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Libraries
import csv
import PyPDF2
from nltk import FreqDist
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# Write a for-loop to open many files (leave a comment if you'd like to learn how).
filename = 'test.pdf'
# Count of output file
count_word = 30
# open allows you to read the file.
pdfFileObj = open(filename, 'rb')
# The pdfReader variable is a readable object that will be parsed.
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# Discerning the number of pages will allow us to parse through all the pages.
num_pages = pdfReader.numPages
count = 0
text = ""
# The while loop will read each page.
while count < num_pages:
pageObj = pdfReader.getPage(count)
count += 1
text += pageObj.extractText()
# The word_tokenize() function will break our text phrases into individual words.
tokens = word_tokenize(text)
# Remove single-character tokens (mostly punctuation)
tokens = [word for word in tokens if len(word) > 1]
# Remove numbers
tokens = [word for word in tokens if not word.isnumeric()]
# We'll create a new list that contains punctuation we wish to clean.
punctuations = ['(', ')', ';', ':', '[', ']', ',', '.']
# We initialize the stopwords variable, which is a list of words like "The," "I," "and," etc. that don't hold much
# value as keywords.
stop_words = stopwords.words('english')
# We create a list comprehension that only returns a list of words that are NOT IN stop_words and NOT IN punctuations.
keywords = [word for word in tokens if not word.lower() in stop_words and not word in punctuations]
# A frequency distribution for the outcomes of an experiment.
f_dist = FreqDist()
for word in keywords:
f_dist[word.lower()] += 1
# Create and write a csv file
with open('out.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["word", "weight"])
writer.writerows(f_dist.most_common(count_word))
# Print all of data
print(f_dist.most_common(count_word))