2

I have CNN project on Signature Identification, but I got error like the title. Here is the code

#import lib
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
import time
#tensorflow lib
import tensorflow
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.callbacks import Callback
from keras import backend as K
from keras import optimizers
#sklearn lib
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report

#data specification
DIRECTORY = 'C:/Users/MSI GF/Pictures/DataLatih/'
CATEGORIES = ["akMundur", "akTajam", "akLembut", "caMenaik", "caMenurun", "cangkang", "coretanTengah", "garisBawah", "others"]
DATASET = []
IMG_ROWS, IMG_COLS = 224, 224
num_classes = 8
#Load DATASET and create DATASET *once exec
def create_training_data():
    for category in CATEGORIES:
        path = os.path.join(DIRECTORY,category)
        class_num = CATEGORIES.index(category)
        for img in os.listdir(path):
            try:
                #read img and preprocess
                #RGB to grayscale
                img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
                #grayscale to threshold
                retval,img_array = cv2.threshold(img_array, 128, 1, cv2.THRESH_BINARY)
                DATASET.append([img_array,class_num])
            except Exception as e:
                pass
        print("Jumlah data: ", len(DATASET))
            
create_training_data()

print("Persiapan Data")
#split training set and test set
X = [] #features
Y = [] #labels

for features, label in DATASET:
    X.append(features)
    Y.append(label)

X = np.array(X).reshape(-1, IMG_ROWS, IMG_COLS, 1)
print("Ukuran DATASET : ", X.shape)
#split X, Y to train and test set
x_train,x_test,y_train,y_test = train_test_split(X, Y, train_size=0.4)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train/=255
x_test/=255

print("Ukuran x_train : ", x_train.shape)
print("Ukuran x_test : ", x_test.shape)
print("Ukuran y_train : ", len(y_train))
print("Ukuran y_test : ", len(y_test))

#checking image
#change dimension to plt
print("Contoh lima sampel data x_train")
x_train = np.array(x_train).reshape(-1, IMG_ROWS, IMG_COLS)
plt.figure(figsize=(10,10))
for i in range(10):
    plt.imshow(x_train[i], cmap=plt.cm.gray)
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.xlabel(CATEGORIES[y_train[i]])
plt.show()

#reshape back to use in learning
x_train = np.array(x_train).reshape(-1, IMG_ROWS, IMG_COLS, 1)
#convert class vector to binary class metrics
y_train = keras.utils.to_categorical(y_train, num_classes)
x_test = keras.utils.to_categorical(x_test, num_classes)

print("Contoh kelas : ")
print(y_train[0])
print(y_test[0])

#begin model, using ALexNet architecture
model = Sequential()
#1st Conv layer
model.add(Conv2D(filters=96, input_shape=[224,224,1], kernel_size=(11,11), strides=(4,4), padding='valid', activation='relu'))
#Max Pooling layer 1
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))

#2nd Conv Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid', activation='relu'))
#Max Pooling layer 2
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))

#3rd Conv Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))

#4th Conv Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))

#5th Conv Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid', activation='relu'))
#Max Pooling Layer 3
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))

#passing to Fully Connected Layer
model.add(Flatten())
#1st FC Layer
model.add(Dense(4096, input_shape=(224*224*1,)))
model.add(Activation('relu'))
model.add(Dropout(0.4))

#2nd FC Layer
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.4))

#3rd FC Layer
model.add(Dense(1000))
model.add(Activation('relu'))
model.add(Dropout(0.4))

#Output layer
model.add(Dense(8))
model.add(Activation('softmax'))

model.summary()

sgd = optimizers.SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['categorical_accuracy'])

#training model
print("Training model")
start_time = time.time()
history = model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test))

print("\nTraining Model Selesai")
print("Lama waktu learning: ", (time.time() - start_time) / 60)

print(history.history.keys())
plt.figure(1)

#calculate loss and accuracy
score = model.evaluate(x_test, y_test)
print('Model telah selesai dilakukan pelatihan')
print('Test Loss : ', score[0])
print('Test Accuracy : ', score[1]*100.0)

#save model and weight
#model to json files
model_json = model.to.json()
with open("model/model_json_sgd001.json", "w") as json_file:
    json_file.write(model_json)
#weight to h5 file
model.save_weights("model/model_1_sgd001.h5")

print("Model dan Bobot telah disimpan")

the error showed in training model section. And here is the error

--------------------------------------------------------------------------- ValueError                                Traceback (most recent call last) <ipython-input-12-c5d6ddb58171> in <module>
      2 print("Training model")
      3 start_time = time.time()
----> 4 history = model.fit(x_train, y_train, epochs=100, validation_data=(x_test, y_test))
      5 
      6 print("\nTraining Model Selesai")

C:\Anaconda3\envs\PythonGPU\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps,
**kwargs)
    970                 val_x, val_y,
    971                 sample_weight=val_sample_weight,
--> 972                 batch_size=batch_size)
    973             if self._uses_dynamic_learning_phase():
    974                 val_ins = val_x + val_y + val_sample_weights + [0.]

C:\Anaconda3\envs\PythonGPU\lib\site-packages\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
    749             feed_input_shapes,
    750             check_batch_axis=False,  # Don't enforce the batch size.
--> 751             exception_prefix='input')
    752 
    753         if y is not None:

C:\Anaconda3\envs\PythonGPU\lib\site-packages\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
    136                             ': expected ' + names[i] + ' to have shape ' +
    137                             str(shape) + ' but got array with shape ' +
--> 138                             str(data_shape))
    139     return data
    140 

ValueError: Error when checking input: expected conv2d_1_input to have shape (224, 224, 1) but got array with shape (224, 224, 8)

I assumed this is because my input has 8 channel instead of 1, but how to fix it?

1 Answer 1

2

Seems like you one-hot encoded your input tensor instead of the labels:

x_test = keras.utils.to_categorical(x_test, num_classes)

I think it should be y_test

Sign up to request clarification or add additional context in comments.

4 Comments

Oh I must be blind because i didnt see that one, thank you, but now I face another error --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-14-132e085da67b> in <module> 1 #save model and weight 2 #model to json files ----> 3 model_json = model.to.json() 4 with open("model/model_json_sgd001.json", "w") as json_file: 5 json_file.write(model_json) AttributeError: 'Sequential' object has no attribute 'to'
you wrote to.json somewhere instead of to_json. but it's best to ask only one question per thread so i would encourage you to ask any new question in a new thread
so i should make a new question? i'm new in communication using tensorflow
you should make 1 question for 1 issue. but i answered your second answer already. if you have another one after the problem with json, it would be better to ask it in another thread, and mark this question as solved if it solved your problem

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.