我遇到了一个从未有过的奇怪问题:我建立了一个简单的模型:
merged = Concatenate()(model_inputs)
merged = Dense(50,activation='relu')(merged)
merged = Dense(30,activation='relu')(merged)
merged = Dense(1,activation='sigmoid')(merged)
model = Model(inputs=model_inputs,outputs=merged)
model_inputs
是:
[<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'CryoSleep')>,
<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'RoomService')>,
<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'Spa')>,
<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'VRDeck')>,
<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'Deck')>,
<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'Side')>,
<KerasTensor: shape=(None, 1) dtype=float32 (created by layer 'AllSpending')>]
现在我编译并想适合模型:
X_Train_nn = create_input_values(X_Train)
y_Train_nn = create_label_values(y_Train)
X_Val_nn = create_input_values(X_Val)
y_Val_nn = create_label_values(y_Val)
model.compile(loss='binary_crossentropy',optimizer='nadam',metrics=['acc'])
model.fit(X_Train_nn,y_Train_nn,epochs=50,batch_size=32,validation_data=(X_Val_nn , y_Val_nn ),verbose=1)
该模型训练一个epoch,但在尝试验证时失败。如果我删除验证部分,它就能成功工作。否则,我会收到以下错误消息:
ValueError: Layer "model_1" expects 7 input(s), but it received 1 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, 7) dtype=float64>]
但事实并非如此。验证集的长度正确。为什么?我可以用X_Val_nn
训练模型,它训练正确。我使用X_Val_nn
和evaluate
函数,它工作正常。
我甚至将训练集作为训练和验证数据传递,并显示相同的错误。因此,它能够使用X_Train_nn
进行训练,但验证失败。并且它总是显示上面显示的错误消息。
你知道这是怎么回事吗?
PS:整个traceback如下:
Epoch 1/50
Exception ignored in: <function _xla_gc_callback at 0x7f601ee392d0>
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/jax/_src/lib/__init__.py", line 103, in _xla_gc_callback
def _xla_gc_callback(*args):
KeyboardInterrupt:
26/28 [==========================>...] - ETA: 0s - loss: 0.4339 - acc: 0.7849
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-195-c3a76520886a> in <cell line: 6>()
4 y_Val_nn = create_label_values(y_Val)
5 model.compile(loss='binary_crossentropy',optimizer='nadam',metrics=['acc'])
----> 6 model.fit(X_Val_nn,y_Val_nn,epochs=50,batch_size=32,validation_data=(range(100), y_Train_nn),verbose=1)
33 frames
/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
59 def error_handler(*args, **kwargs):
60 if not tf.debugging.is_traceback_filtering_enabled():
---> 61 return fn(*args, **kwargs)
62
63 filtered_tb = None
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1727 steps_per_execution=self._steps_per_execution,
1728 )
-> 1729 val_logs = self.evaluate(
1730 x=val_x,
1731 y=val_y,
/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
59 def error_handler(*args, **kwargs):
60 if not tf.debugging.is_traceback_filtering_enabled():
---> 61 return fn(*args, **kwargs)
62
63 filtered_tb = None
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in evaluate(self, x, y, batch_size, verbose, sample_weight, steps, callbacks, max_queue_size, workers, use_multiprocessing, return_dict, **kwargs)
2070 ):
2071 callbacks.on_test_batch_begin(step)
-> 2072 tmp_logs = self.test_function(iterator)
2073 if data_handler.should_sync:
2074 context.async_wait()
/usr/local/lib/python3.10/dist-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
139 try:
140 if not is_traceback_filtering_enabled():
--> 141 return fn(*args, **kwargs)
142 except NameError:
143 # In some very rare cases,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py in __call__(self, *args, **kwds)
892
893 with OptionalXlaContext(self._jit_compile):
--> 894 result = self._call(*args, **kwds)
895
896 new_tracing_count = self.experimental_get_tracing_count()
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py in _call(self, *args, **kwds)
940 # This is the first call of __call__, so we have to initialize.
941 initializers = []
--> 942 self._initialize(args, kwds, add_initializers_to=initializers)
943 finally:
944 # At this point we know that the initialization is complete (or less
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py in _initialize(self, args, kwds, add_initializers_to)
761 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
762 self._concrete_variable_creation_fn = (
--> 763 self._variable_creation_fn # pylint: disable=protected-access
764 ._get_concrete_function_internal_garbage_collected(
765 *args, **kwds))
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
169 """Returns a concrete function which cleans up its graph function."""
170 with self._lock:
--> 171 concrete_function, _ = self._maybe_define_concrete_function(args, kwargs)
172 return concrete_function
173
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py in _maybe_define_concrete_function(self, args, kwargs)
164 kwargs = {}
165
--> 166 return self._maybe_define_function(args, kwargs)
167
168 def _get_concrete_function_internal_garbage_collected(self, *args, **kwargs):
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py in _maybe_define_function(self, args, kwargs)
394 kwargs = placeholder_bound_args.kwargs
395
--> 396 concrete_function = self._create_concrete_function(
397 args, kwargs, func_graph)
398
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py in _create_concrete_function(self, args, kwargs, func_graph)
298
299 concrete_function = monomorphic_function.ConcreteFunction(
--> 300 func_graph_module.func_graph_from_py_func(
301 self._name,
302 self._python_function,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, create_placeholders, acd_record_initial_resource_uses)
1212 _, original_func = tf_decorator.unwrap(python_func)
1213
-> 1214 func_outputs = python_func(*func_args, **func_kwargs)
1215
1216 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py in wrapped_fn(*args, **kwds)
665 # the function a weak reference to itself to avoid a reference cycle.
666 with OptionalXlaContext(compile_with_xla):
--> 667 out = weak_wrapped_fn().__wrapped__(*args, **kwds)
668 return out
669
/usr/local/lib/python3.10/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1198 except Exception as e: # pylint:disable=broad-except
1199 if hasattr(e, "ag_error_metadata"):
-> 1200 raise e.ag_error_metadata.to_exception(e)
1201 else:
1202 raise
/usr/local/lib/python3.10/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1187 # TODO(mdan): Push this block higher in tf.function's call stack.
1188 try:
-> 1189 return autograph.converted_call(
1190 original_func,
1191 args,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py in converted_call(f, args, kwargs, caller_fn_scope, options)
437 try:
438 if kwargs is not None:
--> 439 result = converted_f(*effective_args, **kwargs)
440 else:
441 result = converted_f(*effective_args)
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in tf__test_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
/usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py in converted_call(f, args, kwargs, caller_fn_scope, options)
375
376 if not options.user_requested and conversion.is_allowlisted(f):
--> 377 return _call_unconverted(f, args, kwargs, options)
378
379 # internal_convert_user_code is for example turned off when issuing a dynamic
/usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py in _call_unconverted(f, args, kwargs, options, update_cache)
457 if kwargs is not None:
458 return f(*args, **kwargs)
--> 459 return f(*args)
460
461
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in step_function(model, iterator)
1834
1835 data = next(iterator)
-> 1836 outputs = model.distribute_strategy.run(run_step, args=(data,))
1837 outputs = reduce_per_replica(
1838 outputs,
/usr/local/lib/python3.10/dist-packages/tensorflow/python/distribute/distribute_lib.py in run(***failed resolving arguments***)
1314 fn = autograph.tf_convert(
1315 fn, autograph_ctx.control_status_ctx(), convert_by_default=False)
-> 1316 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
1317
1318 def reduce(self, reduce_op, value, axis):
/usr/local/lib/python3.10/dist-packages/tensorflow/python/distribute/distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
2893 kwargs = {}
2894 with self._container_strategy().scope():
-> 2895 return self._call_for_each_replica(fn, args, kwargs)
2896
2897 def _call_for_each_replica(self, fn, args, kwargs):
/usr/local/lib/python3.10/dist-packages/tensorflow/python/distribute/distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
3694 def _call_for_each_replica(self, fn, args, kwargs):
3695 with ReplicaContext(self._container_strategy(), replica_id_in_sync_group=0):
-> 3696 return fn(*args, **kwargs)
3697
3698 def _reduce_to(self, reduce_op, value, destinations, options):
/usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py in wrapper(*args, **kwargs)
687 try:
688 with conversion_ctx:
--> 689 return converted_call(f, args, kwargs, options=options)
690 except Exception as e: # pylint:disable=broad-except
691 if hasattr(e, 'ag_error_metadata'):
/usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py in converted_call(f, args, kwargs, caller_fn_scope, options)
375
376 if not options.user_requested and conversion.is_allowlisted(f):
--> 377 return _call_unconverted(f, args, kwargs, options)
378
379 # internal_convert_user_code is for example turned off when issuing a dynamic
/usr/local/lib/python3.10/dist-packages/tensorflow/python/autograph/impl/api.py in _call_unconverted(f, args, kwargs, options, update_cache)
456
457 if kwargs is not None:
--> 458 return f(*args, **kwargs)
459 return f(*args)
460
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in run_step(data)
1822
1823 def run_step(data):
-> 1824 outputs = model.test_step(data)
1825 # Ensure counter is updated only if `test_step` succeeds.
1826 with tf.control_dependencies(_minimum_control_deps(outputs)):
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in test_step(self, data)
1786 x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
1787
-> 1788 y_pred = self(x, training=False)
1789 # Updates stateful loss metrics.
1790 self.compute_loss(x, y, y_pred, sample_weight)
/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
59 def error_handler(*args, **kwargs):
60 if not tf.debugging.is_traceback_filtering_enabled():
---> 61 return fn(*args, **kwargs)
62
63 filtered_tb = None
/usr/local/lib/python3.10/dist-packages/keras/engine/training.py in __call__(self, *args, **kwargs)
556 layout_map_lib._map_subclass_model_variable(self, self._layout_map)
557
--> 558 return super().__call__(*args, **kwargs)
559
560 @doc_controls.doc_in_current_and_subclasses
/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
59 def error_handler(*args, **kwargs):
60 if not tf.debugging.is_traceback_filtering_enabled():
---> 61 return fn(*args, **kwargs)
62
63 filtered_tb = None
/usr/local/lib/python3.10/dist-packages/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
1110 ):
1111
-> 1112 input_spec.assert_input_compatibility(
1113 self.input_spec, inputs, self.name
1114 )
/usr/local/lib/python3.10/dist-packages/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
217
218 if len(inputs) != len(input_spec):
--> 219 raise ValueError(
220 f'Layer "{layer_name}" expects {len(input_spec)} input(s),'
221 f" but it received {len(inputs)} input tensors. "
ValueError: in user code:
File "/usr/local/lib/python3.10/dist-packages/keras/engine/training.py", line 1852, in test_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.10/dist-packages/keras/engine/training.py", line 1836, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 1316, in run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 2895, in call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/tensorflow/python/distribute/distribute_lib.py", line 3696, in _call_for_each_replica
return fn(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/keras/engine/training.py", line 1824, in run_step **
outputs = model.test_step(data)
File "/usr/local/lib/python3.10/dist-packages/keras/engine/training.py", line 1788, in test_step
y_pred = self(x, training=False)
File "/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py", line 61, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/keras/engine/training.py", line 558, in __call__
return super().__call__(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/keras/utils/traceback_utils.py", line 61, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/keras/engine/base_layer.py", line 1112, in __call__
input_spec.assert_input_compatibility(
File "/usr/local/lib/python3.10/dist-packages/keras/engine/input_spec.py", line 219, in assert_input_compatibility
raise ValueError(
ValueError: Layer "model_1" expects 7 input(s), but it received 1 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, 7) dtype=float64>]
2条答案
按热度按时间hxzsmxv21#
当仔细看你的回溯,我怀疑如下
X_train_nn
和X_val_nn
真的有相同的数据形状吗?1.如果是这样的话,在
validation_data
中放入的内容,特别是在您的例子中,在许多分层调用的方法中会以某种方式发生变化。我的结论是这样的,因为如果你看到回溯,当修改后的
x
被放入self()
或evaluate()
时会发生错误,我认为这需要与X_train_nn
相同的数据形状。az31mfrm2#
首先分析您的训练和测试数据,然后为其分配值,然后根据您的任务要求选择激活函数。