英文:
Input 0 is incompatible with layer when using dataset.batch
问题
以下是您提供的代码的翻译部分:
我提供了以下正常运行的代码。
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Input, Dense, Reshape, Dropout, \
BatchNormalization, Activation, Conv2D, Conv2DTranspose, LeakyReLU
from tensorflow.keras.models import Model
AUTOTUNE = tf.data.experimental.AUTOTUNE
HEIGHT = 39
WIDTH = 39
CHANNELS = 2
SCALE_FACTOR = 4
VAL_SPLIT = 0.1
TRAIN_SPLIT = 0.8
TEST_SPLIT = 0.1
SEED = 1
BUFFER_SIZE = 100
LR = 1e-4
BATCH_SIZE = 2
INP_LOW = (HEIGHT, WIDTH, CHANNELS)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def resize_and_rescale(low, high):
high = tf.image.resize(high,
(HEIGHT, WIDTH),
preserve_aspect_ratio=False)
return low, high
def split_train_test_val(ds,
seed,
train_split=TRAIN_SPLIT,
val_split=VAL_SPLIT,
test_split=TEST_SPLIT,
shuffle=True,
shuffle_size=BUFFER_SIZE):
assert (train_split + test_split + val_split) == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size,
reshuffle_each_iteration=False,
seed=seed)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
test_size = int(test_split * ds_size)
train_ds = ds.take(train_size)
test_ds = ds.skip(train_size)
val_ds = test_ds.skip(test_size)
test_ds = test_ds.take(test_size)
return train_ds, val_ds, test_ds
def prepare(ds, shuffle=False):
ds = ds.map(resize_and_rescale, num_parallel_calls=AUTOTUNE)
ds = ds.cache()
if shuffle:
ds = ds.shuffle(buffer_size=BUFFER_SIZE)
# ds = ds.batch(BATCH_SIZE)
# ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
def data_gen(low_res, high_res):
dataset_low = tf.data.Dataset.from_tensor_slices(low_res)
dataset_high = tf.data.Dataset.from_tensor_slices(high_res)
dataset = tf.data.Dataset.zip((dataset_low, dataset_high))
train_ds, val_ds, test_ds = split_train_test_val(dataset,
SEED,
train_split=TRAIN_SPLIT,
val_split=VAL_SPLIT,
test_split=TEST_SPLIT,
shuffle=True,
shuffle_size=BUFFER_SIZE)
train_ds = prepare(train_ds, shuffle=True)
val_ds = prepare(val_ds)
test_ds = prepare(val_ds)
return train_ds, val_ds, test_ds
def build_model(lr):
inp = Input(lr)
x = Dense(16)(inp)
x = Conv2DTranspose(CHANNELS, kernel_size=3, strides=1, padding='same')(x)
output = Activation('tanh')(x)
model = Model(inp, output)
return model
low = np.load('low.npy')
high = np.load('high.npy')
train_ds, val_ds, test_ds = data_gen(low, high)
model = build_model(INP_LOW)
model.compile(loss=['mse'],
optimizer= tf.keras.optimizers.Adam(learning_rate=LR))
train_low, train_high = tf.data.experimental.get_single_element(train_ds.batch(len(train_ds)))
history = model.fit(train_low,
train_high,
epochs=2,
batch_size=BATCH_SIZE)
请注意,我只提供了代码的翻译,不包括任何其他内容。如果您需要进一步的解释或指导,请告诉我。
英文:
I gave the following code which runs fine.
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Input, Dense, Reshape, Dropout, \
BatchNormalization, Activation, Conv2D, Conv2DTranspose, LeakyReLU
from tensorflow.keras.models import Model
AUTOTUNE = tf.data.experimental.AUTOTUNE
HEIGHT = 39
WIDTH = 39
CHANNELS = 2
SCALE_FACTOR = 4
VAL_SPLIT = 0.1
TRAIN_SPLIT = 0.8
TEST_SPLIT = 0.1
SEED = 1
BUFFER_SIZE = 100
LR = 1e-4
BATCH_SIZE = 2
INP_LOW = (HEIGHT, WIDTH, CHANNELS)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def resize_and_rescale(low, high):
high = tf.image.resize(high,
(HEIGHT, WIDTH),
preserve_aspect_ratio=False)
return low, high
def split_train_test_val(ds,
seed,
train_split=TRAIN_SPLIT,
val_split=VAL_SPLIT,
test_split=TEST_SPLIT,
shuffle=True,
shuffle_size=BUFFER_SIZE):
assert (train_split + test_split + val_split) == 1
ds_size = len(ds)
if shuffle:
ds = ds.shuffle(shuffle_size,
reshuffle_each_iteration=False,
seed=seed)
train_size = int(train_split * ds_size)
val_size = int(val_split * ds_size)
test_size = int(test_split * ds_size)
train_ds = ds.take(train_size)
test_ds = ds.skip(train_size)
val_ds = test_ds.skip(test_size)
test_ds = test_ds.take(test_size)
return train_ds, val_ds, test_ds
def prepare(ds, shuffle=False):
ds = ds.map(resize_and_rescale, num_parallel_calls=AUTOTUNE)
ds = ds.cache()
if shuffle:
ds = ds.shuffle(buffer_size=BUFFER_SIZE)
#ds = ds.batch(BATCH_SIZE)
#ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
def data_gen(low_res, high_res):
dataset_low = tf.data.Dataset.from_tensor_slices(low_res)
dataset_high = tf.data.Dataset.from_tensor_slices(high_res)
dataset = tf.data.Dataset.zip((dataset_low, dataset_high))
train_ds, val_ds, test_ds = split_train_test_val(dataset,
SEED,
train_split=TRAIN_SPLIT,
val_split=VAL_SPLIT,
test_split=TEST_SPLIT,
shuffle=True,
shuffle_size=BUFFER_SIZE)
train_ds = prepare(train_ds, shuffle=True)
val_ds = prepare(val_ds)
test_ds = prepare(val_ds)
return train_ds, val_ds, test_ds
def build_model(lr):
inp = Input(lr)
x = Dense(16)(inp)
x = Conv2DTranspose(CHANNELS, kernel_size=3, strides=1, padding='same')(x)
output = Activation('tanh')(x)
model = Model(inp, output)
return model
low = np.load('low.npy')
high = np.load('high.npy')
train_ds, val_ds, test_ds = data_gen(low, high)
model = build_model(INP_LOW)
model.compile(loss=['mse'],
optimizer= tf.keras.optimizers.Adam(learning_rate=LR))
train_low, train_high = tf.data.experimental.get_single_element(train_ds.batch(len(train_ds)))
history = model.fit(train_low,
train_high,
epochs=2,
batch_size=BATCH_SIZE)
But when I try to use :
ds = ds.batch(BATCH_SIZE)
ds = ds.prefetch(buffer_size=AUTOTUNE)
in the prepare
function and at the same time, I ommit the batch_size
in fit:
history = model.fit(train_low,
train_high,
epochs=2)
I am receiving:
ValueError: Input 0 is incompatible with layer model_2: expected shape=(None, 39, 39, 2), found shape=(32, 2, 39, 39, 2)
You can find the data here
I would expect, since I remove the batch size from fit , to work.
答案1
得分: 1
在使用 tf.data.Dataset
在 model.fit
中时,只需提供 model.fit
的 x
参数,假设您的 tf.data.Dataset
返回一个元组 (input_features, targets)
。
您可以在 keras.Model.fit 文档 中阅读更多信息,以下是相关信息的摘录:
> 参数
>
> - x
: 输入数据。它可以是:
> - 一个 tf.data
数据集。应返回一个元组,要么是 (inputs, targets),要么是 (inputs, targets, sample_weights)。
> - y
: 如果 x 是一个数据集、生成器或 keras.utils.Sequence
实例,不应指定 y
(因为目标将从 x
获取)。
假设 train_high
是您的输入特征,train_low
是您的目标,您只需调用 model.fit(train_ds, epochs=2)
,并跳过以下这行代码:
train_low, train_high = tf.data.experimental.get_single_element(train_ds.batch(len(train_ds)))
英文:
When using a tf.data.Dataset
in model.fit
, you should provide only the x
argument of model.fit
, with the assumption that your tf.data.Dataset
returns a tuple (input_features, targets)
.
You can read more in the documentation of keras.Model.fit
. Here's an excerpt with the relevant info:
> Args
>
> - x
: Input data. It could be:
> - A tf.data
dataset. Should return a tuple of either (inputs, targets) or (inputs, targets, sample_weights).
>
> - y
: If x is a dataset, generator, or keras.utils.Sequence
instance, y
> should not be specified (since targets will be obtained from x
).
Assuming that train_high
are your input features, and train_low
are your targets, you should simply call model.fit(train_ds, epochs=2)
, and skip the line
train_low, train_high = tf.data.experimental.get_single_element(train_ds.batch(len(train_ds)))
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论