Text2speechプロジェクトを再実装しています。 関数呼び出しスタック:keras_scratch_graphデコーダ部分のエラーに直面しています。ネットワークアーキテクチャは、Deep Voice 3ペーパーに基づいています。
Google ColabでTF 2.0のケラスを使用しています。以下は、Decoder Kerasモデルのコードです。
y1 = tf.ones(shape = (16, 203, 320))
def Decoder(name = "decoder"):
# Decoder Prenet
din = tf.concat((tf.zeros_like(y1[:, :1, -hp.mel:]), y1[:, :-1, -hp.mel:]), 1)
keys = K.Input(shape = (180, 256), batch_size = 16, name = "keys")
vals = K.Input(shape = (180, 256), batch_size = 16, name = "vals")
prev_max_attentions_li = tf.ones(shape=(hp.dlayer, hp.batch_size), dtype=tf.int32)
#prev_max_attentions_li = K.Input(tensor = prev_max_attentions_li)
for i in range(hp.dlayer):
dpout = K.layers.Dropout(rate = 0 if i == 0 else hp.dropout)(din)
fc_out = K.layers.Dense(hp.char_embed, activation = 'relu')(dpout)
print("=======================================================================================================")
print("The FC value is ", fc_out)
print("=======================================================================================================")
query_pe = K.layers.Embedding(hp.Ty, hp.char_embed)(tf.tile(tf.expand_dims(tf.range(hp.Ty // hp.r), 0), [hp.batch_size, 1]))
key_pe = K.layers.Embedding(hp.Tx, hp.char_embed)(tf.tile(tf.expand_dims(tf.range(hp.Tx), 0), [hp.batch_size, 1]))
alignments_li, max_attentions_li = [], []
for i in range(hp.dlayer):
dpout = K.layers.Dropout(rate = 0)(fc_out)
queries = K.layers.Conv1D(hp.datten_size, hp.dfilter, padding = 'causal', dilation_rate = 2**i)(dpout)
fc_out = (queries + fc_out) * tf.math.sqrt(0.5)
print("=======================================================================================================")
print("The FC value is ", fc_out)
print("=======================================================================================================")
queries = fc_out + query_pe
keys += key_pe
tensor, alignments, max_attentions = Attention(name = "attention")(queries, keys, vals, prev_max_attentions_li[i])
fc_out = (tensor + queries) * tf.math.sqrt(0.5)
alignments_li.append(alignments)
max_attentions_li.append(max_attentions)
decoder_output = fc_out
dpout = K.layers.Dropout(rate = 0)(decoder_output)
mel_logits = K.layers.Dense(hp.mel * hp.r)(dpout)
dpout = K.layers.Dropout(rate = 0)(fc_out)
done_output = K.layers.Dense(2)(dpout)
return K.Model(inputs = [keys, vals], outputs = [mel_logits, done_output, decoder_output, alignments_li, max_attentions_li], name = name)
decode = Decoder()
kin = tf.ones(shape = (16, 180, 256))
vin = tf.ones(shape = (16, 180, 256))
print(decode(kin, vin))
tf.keras.utils.plot_model(decode, to_file = "decoder.png", show_shapes = True)
いくつかのデータでテストすると、以下のエラーメッセージが表示されます。 "fc_out"で問題が発生しますが、 "fc_out"の出力を最初のforループから2番目のforループに渡す方法を知っていますか?どんな答えでもいただければ幸いです。
File "Decoder.py", line 60, in <module>
decode = Decoder()
File "Decoder.py", line 33, in Decoder
dpout = K.layers.Dropout(rate = 0)(fc_out)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 596, in __call__
base_layer_utils.create_keras_history(inputs)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 199, in create_keras_history
_, created_layers = _create_keras_history_helper(tensors, set(), [])
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 245, in _create_keras_history_helper
layer_inputs, processed_ops, created_layers)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 245, in _create_keras_history_helper
layer_inputs, processed_ops, created_layers)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 245, in _create_keras_history_helper
layer_inputs, processed_ops, created_layers)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 243, in _create_keras_history_helper
constants[i] = backend.function([], op_input)([])
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/keras/backend.py", line 3510, in __call__
outputs = self._graph_fn(*converted_inputs)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 572, in __call__
return self._call_flat(args)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 671, in _call_flat
outputs = self._inference_function.call(ctx, args)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 445, in call
ctx=ctx)
File "/Users/ydc/dl-npm/lib/python3.7/site-packages/tensorflow/python/eager/execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.FailedPreconditionError: Error while reading resource variable _AnonymousVar19 from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/_AnonymousVar19/N10tensorflow3VarE does not exist.
[[node dense_7/BiasAdd/ReadVariableOp (defined at Decoder.py:33) ]] [Op:__inference_keras_scratch_graph_566]
Function call stack:
keras_scratch_graph
Tensorflow-GPUを使用する場合は、次を追加します。
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print("physical_devices-------------", len(physical_devices))
tf.config.experimental.set_memory_growth(physical_devices[0], True)
さらに、batch_sizeを小さくしたり、google colab、Amazonクラウドなどの別のコンピューターまたはクラウドサービスを変更してコードを実行したりすることができます。これは、メモリの制限によるものだと思います。