Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GAN integration #12

Open
wants to merge 23 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -99,3 +99,9 @@ ENV/

# mypy
.mypy_cache/

# my_files
/models/*
/models_old/*
/saved_old_runs/*
/saved_runs/*
6 changes: 6 additions & 0 deletions .idea/libraries/R_User_Library.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

304 changes: 304 additions & 0 deletions .idea/workspace.xml

Large diffs are not rendered by default.

32,624 changes: 32,624 additions & 0 deletions Writing-editing network/acl_titles_and_abstracts.txt

Large diffs are not rendered by default.

15 changes: 12 additions & 3 deletions Writing-editing network/configurations.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class CommonConfig(object):
num_exams = 3
log_interval = 1000
predict_right_after = 3
patience = 5
advantage_clipping = 5

class SmallDatasetWithTopics(CommonConfig):
relative_data_path = '/data/small-json-topics/train.dat'
Expand Down Expand Up @@ -153,6 +153,14 @@ class LargeConfig6(LargeDataset):
use_topics = True
experiment_name = "lg-with-topics-lr-0.0001-WE-300"

class RandomConfig(SmallDataset):
emsize = 512
context_dim = 128
lr = 0.0001
pretrained = None
use_topics = False
experiment_name = "random"

configuration = {
"st1": SmallTopicsConfig1(),
"st2": SmallTopicsConfig2(),
Expand All @@ -168,7 +176,8 @@ class LargeConfig6(LargeDataset):
"l3": LargeConfig3(),
"l4": LargeConfig4(),
"l5": LargeConfig5(),
"l6": LargeConfig6()}
"l6": LargeConfig6(),
"random": RandomConfig()}

def get_conf(name):
return configuration[name]
return configuration[name]
87 changes: 87 additions & 0 deletions Writing-editing network/eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import pickle
import os
import collections
import sys

sys.path.append('pycocoevalcap')
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
#from pycocoevalcap.cider.cider import Cider

class Evaluate(object):
def __init__(self):
self.scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L")
]#, (Cider(), "CIDEr")

def convert(self, data):
if isinstance(data, basestring):
return data.encode('utf-8')
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.items()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data

def score(self, ref, hypo):
final_scores = {}
for scorer, method in self.scorers:
score, scores = scorer.compute_score(ref, hypo)
if type(score) == list:
for m, s in zip(method, score):
final_scores[m] = s
else:
final_scores[method] = score

return final_scores

def evaluate(self, get_scores=True, live=False, **kwargs):
if live:
temp_ref = kwargs.pop('ref', {})
cand = kwargs.pop('cand', {})
else:
reference_path = kwargs.pop('ref', '')
candidate_path = kwargs.pop('cand', '')

# load caption data
with open(reference_path, 'rb') as f:
temp_ref = pickle.load(f)
with open(candidate_path, 'rb') as f:
cand = pickle.load(f)

# make dictionary
hypo = {}
ref = {}
i = 0
for vid, caption in cand.items():
hypo[i] = [caption]
ref[i] = temp_ref[vid]
i += 1

# compute scores
final_scores = self.score(ref, hypo)
#"""
# print out scores
print ('Bleu_1:\t', final_scores['Bleu_1'])
print ('Bleu_2:\t', final_scores['Bleu_2'])
print ('Bleu_3:\t', final_scores['Bleu_3'])
print ('Bleu_4:\t', final_scores['Bleu_4'])
print ('METEOR:\t', final_scores['METEOR'])
print ('ROUGE_L:', final_scores['ROUGE_L'])
#print ('CIDEr:\t', final_scores['CIDEr'])
#"""

if get_scores:
return final_scores


if __name__ == '__main__':
cand = {'generated_description1': 'how are you', 'generated_description2': 'Hello how are you'}
ref = {'generated_description1': ['what are you', 'where are you'],
'generated_description2': ['Hello how are you', 'Hello how is your day']}
x = Evaluate()
x.evaluate(live=True, cand=cand, ref=ref)
Loading