This works almost perfectly, except for the first letter of the lines.
This commit is contained in:
@@ -13,35 +13,44 @@ indices_char = dict((i, c) for i, c in enumerate(characters))
|
||||
|
||||
INPUT_VOCAB_SIZE = len(characters)
|
||||
BATCH_SIZE = 200
|
||||
HIDDEN_SIZE = 128
|
||||
HIDDEN_SIZE = 100
|
||||
N_LAYERS = 1
|
||||
TIME_STEPS = 3
|
||||
|
||||
def encode_one_hot(line):
|
||||
remain = len(line) % TIME_STEPS
|
||||
if remain != 0:
|
||||
line = line + ' ' * (TIME_STEPS-remain)
|
||||
# remain = len(line) % TIME_STEPS
|
||||
# if remain != 0:
|
||||
# line = line + ' ' * (TIME_STEPS-remain)
|
||||
x = np.zeros((len(line), INPUT_VOCAB_SIZE))
|
||||
for i, c in enumerate(line):
|
||||
index = char_indices[c] if c in characters else char_indices[' ']
|
||||
x[i][index] = 1
|
||||
return np.reshape(x, ( (int)(len(line) / TIME_STEPS), TIME_STEPS, INPUT_VOCAB_SIZE ) )
|
||||
return x
|
||||
|
||||
def decode_one_hot(y):
|
||||
x = np.reshape(y, (y.shape[0]*y.shape[1], INPUT_VOCAB_SIZE))
|
||||
def decode_one_hot(x):
|
||||
s = []
|
||||
for onehot in x:
|
||||
one_index = np.argmax(onehot)
|
||||
s.append(indices_char[one_index])
|
||||
return ''.join(s)
|
||||
|
||||
def prepare_for_rnn(x):
|
||||
# All slices of size TIME_STEPS, sliding through x
|
||||
ind = [np.array(np.arange(i, i+TIME_STEPS)) for i in range(x.shape[0] - TIME_STEPS + 1)]
|
||||
ind = np.array(ind, dtype=np.int32)
|
||||
x_rnn = x[ind]
|
||||
# np.append(x_rnn, np.zeros((TIME_STEPS, INPUT_VOCAB_SIZE)), axis=0)
|
||||
# np.append(x_rnn, np.zeros((TIME_STEPS, INPUT_VOCAB_SIZE)), axis=0)
|
||||
return x_rnn
|
||||
|
||||
def input_generator(nsamples):
|
||||
def generate_line():
|
||||
inline = []; outline = []
|
||||
inline = [' ']; outline = []
|
||||
for _ in range(nsamples):
|
||||
c = random.choice(characters)
|
||||
expected = c.lower() if c in string.ascii_letters else ' '
|
||||
inline.append(c); outline.append(expected)
|
||||
inline.append(' ')
|
||||
for i in range(nsamples):
|
||||
if outline[i] == ' ': continue
|
||||
if i > 0 and i < nsamples-1:
|
||||
@@ -57,7 +66,7 @@ def input_generator(nsamples):
|
||||
#print("Output:", expected)
|
||||
data_in = encode_one_hot(input_data)
|
||||
data_out = encode_one_hot(expected)
|
||||
yield data_in, data_out
|
||||
yield prepare_for_rnn(data_in), data_out
|
||||
|
||||
def train(model):
|
||||
model.compile(loss='categorical_crossentropy',
|
||||
@@ -66,7 +75,7 @@ def train(model):
|
||||
input_gen = input_generator(BATCH_SIZE)
|
||||
validation_gen = input_generator(BATCH_SIZE)
|
||||
model.fit_generator(input_gen,
|
||||
epochs = 200, workers=1,
|
||||
epochs = 50, workers=1,
|
||||
steps_per_epoch = 50,
|
||||
validation_data = validation_gen,
|
||||
validation_steps = 10)
|
||||
@@ -75,10 +84,11 @@ def build_model():
|
||||
model = Sequential()
|
||||
r_layer = LSTM(HIDDEN_SIZE, input_shape=(None, INPUT_VOCAB_SIZE))
|
||||
model.add(r_layer)
|
||||
model.add(RepeatVector(TIME_STEPS))
|
||||
for _ in range(N_LAYERS):
|
||||
model.add(LSTM(HIDDEN_SIZE, return_sequences=True))
|
||||
model.add(TimeDistributed(Dense(INPUT_VOCAB_SIZE, activation='softmax')))
|
||||
# model.add(RepeatVector(TIME_STEPS))
|
||||
# for _ in range(N_LAYERS):
|
||||
# model.add(LSTM(HIDDEN_SIZE, return_sequences=True))
|
||||
# model.add(TimeDistributed(Dense(INPUT_VOCAB_SIZE, activation='softmax')))
|
||||
model.add(Dense(INPUT_VOCAB_SIZE, activation='softmax'))
|
||||
return model
|
||||
|
||||
model = build_model()
|
||||
@@ -89,7 +99,7 @@ input("Network has been trained. Press <Enter> to run program.")
|
||||
with open(sys.argv[1]) as f:
|
||||
for line in f:
|
||||
if line.isspace(): continue
|
||||
batch = encode_one_hot(line)
|
||||
batch = prepare_for_rnn(encode_one_hot(line))
|
||||
preds = model.predict(batch)
|
||||
normal = decode_one_hot(preds)
|
||||
print(normal)
|
||||
Reference in New Issue
Block a user