Is there a way to reduce expected conv2d_Conv2D1_input from 4 dimensions to 3?

Problem:

  • a ValueError is saying that conv2d_Conv2D1_input is expecting to have 4 dimension(s), but got array with shape [475,475,3]

However:

  • The inputShape is set to [475,475,3]
  • when logged, tensors have the shape [475,475,3]

Error: ValueError: Error when checking : expected conv2d_Conv2D1_input to have 4 dimension(s), but got array with shape [475,475,3]

Tensor:

Tensor {
  kept: false,
  isDisposedInternal: false,
  shape: [ 475, 475, 3 ],
  dtype: 'int32',
  size: 676875,
  strides: [ 1425, 3 ],
  dataId: {},
  id: 8,
  rankType: '3',
  scopeId: 4
}

Complete Code:

var tf = require('@tensorflow/tfjs');
var tfnode = require('@tensorflow/tfjs-node');
var fs = require(`fs`)

const main = async () => {
  const loadImage = async (file) => {
    const imageBuffer = await fs.readFileSync(file)
    const tensorFeature = await tfnode.node.decodeImage(imageBuffer, 3)
    return tensorFeature;
  }

  const tensorFeature = await loadImage(`./1.png`)
  const tensorFeature2 = await loadImage(`./4.png`)
  const tensorFeature3 = await loadImage(`./7.png`)

  console.log(tensorFeature)
  console.log(tensorFeature2)
  console.log(tensorFeature3)

  tensorFeatures = [tensorFeature, tensorFeature2, tensorFeature3]

  labelArray = [0, 1, 2]
  tensorLabels = tf.oneHot(tf.tensor1d(labelArray, 'int32'), 3);

  const model = tf.sequential();
  model.add(tf.layers.conv2d({
    inputShape: [475, 475, 3],
    filters: 32,
    kernelSize: 3,
    activation: 'relu',
  }));
  model.add(tf.layers.flatten());
  model.add(tf.layers.dense({units: 3, activation: 'softmax'}));

  model.compile({
    optimizer: 'sgd',
    loss: 'categoricalCrossentropy',
    metrics: ['accuracy']
  });

  model.summary()

  model.fit(tf.stack(tensorFeatures), tensorLabels)

  const im = await loadImage(`./2.png`)
  model.predict(im)
}
main()