forked from Delta-Ark/SpecificRelated_Word_Prompter
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathauto_c_hypo.py
78 lines (65 loc) · 1.96 KB
/
auto_c_hypo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
#imports
from nltk.corpus import wordnet as wn
#bank
words = []
#helpers
def word_list_without_duplicates(word_list):
# remove duplicates by putting in a set
word_list = set(word_list)
# join word_list together with commas
return ', '.join(word_list)
def related_synsets(synset):
return (
synset.hyponyms() +
synset.part_holonyms() +
synset.substance_holonyms() +
synset.part_meronyms() +
synset.substance_meronyms()
)
#loop
while True:
#input system
input_text = raw_input("analyze: ")
#analyzer
split_input = input_text.split()
print " "
for word in split_input:
print "--"+word.upper()+"--NOUN--",
synsets = wn.synsets(word, 'n')
lemma_names = []
for synset in synsets:
for entry in related_synsets(synset):
lemma_names += entry.lemma_names()
print word_list_without_duplicates(lemma_names),
print "--"+word.upper()+"--VERB--",
synsets = wn.synsets(word, 'v')
lemma_names = []
for synset in synsets:
for entry in related_synsets(synset):
lemma_names += entry.lemma_names()
print word_list_without_duplicates(lemma_names),
print "--"+word.upper()+"--ADJ--",
token_a = wn.synsets(word, 'a')
lemma_names = []
for entry in token_a:
lemma_names += entry.lemma_names()
print word_list_without_duplicates(lemma_names),
print "--"+word.upper()+"--ADV--",
token_r = wn.synsets(word, 'r')
lemma_names = []
for entry in token_r:
lemma_names += entry.lemma_names()
print word_list_without_duplicates(lemma_names),
print " "
print " "
#adder
final_input = raw_input("select: ")
words.append(final_input)
#shower
print words
#exiter
if input_text == " ":
print " "
print " ".join(words)
print " "
break