Tuesday, 20 July 2021

How can implement a multi-task deep learning in Keras?

Check the source code in replit.

I have 3 classes (A, B, and C).

I have 6 features:

train_x = [[ 6.442  6.338  7.027  8.789 10.009 12.566]
           [ 6.338  7.027  5.338 10.009  8.122 11.217]
           [ 7.027  5.338  5.335  8.122  5.537  6.408]
           [ 5.338  5.335  5.659  5.537  5.241  7.043]]

These features represent a 5-character string pattern comprising of 3-classes(e.g. AABBC, etc.).

Let, a 5-character string pattern is one-hot encoded as follows:

train_z = [[0. 0. 1. 0. 0. 1. 0. 0. 1. 0. 0. 1. 1. 0. 0.]    
           [0. 0. 1. 0. 0. 1. 0. 0. 1. 1. 0. 0. 1. 0. 0.]
           [0. 0. 1. 0. 0. 1. 1. 0. 0. 1. 0. 0. 1. 0. 0.]    
           [0. 0. 1. 1. 0. 0. 1. 0. 0. 1. 0. 0. 0. 0. 1.]]

I think, this is a Multi-task learning problem.

So, I wrote the following source code:

    # there would be 6 inputs for 6 features
inputs_tensor = keras.Input(shape=(FEATURES_COUNT,))

# there would be 2 hidden layers
hidden_layer_1 = keras.layers.Dense(LAYER_1_NEURON_COUNT, activation="relu")
hidden_layer_2 = keras.layers.Dense(LAYER_2_NEURON_COUNT, activation="relu")

# there would be 15 outputs for 15-bits
# each o/p layer will have 1 neurons for binary data
output_layer_1 = keras.layers.Dense(1, activation='sigmoid')  # 1 neuraons for 1 output
output_layer_2 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_3 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_4 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_5 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_6 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_7 = keras.layers.Dense(1, activation='sigmoid')  # 1 neuraons for 1 output
output_layer_8 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_9 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_10 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_11 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_12 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_13 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_14 = keras.layers.Dense(1, activation='sigmoid')  # -do-
output_layer_15 = keras.layers.Dense(1, activation='sigmoid')  # -do-

# assembling the layers.
x = hidden_layer_1(inputs_tensor)
x = hidden_layer_2(x)
# configuring the output
output1 = output_layer_1(x)
output2 = output_layer_2(x)
output3 = output_layer_3(x)
output4 = output_layer_4(x)
output5 = output_layer_5(x)
output6 = output_layer_6(x)
output7 = output_layer_7(x)
output8 = output_layer_8(x)
output9 = output_layer_9(x)
output10 = output_layer_10(x)
output11 = output_layer_11(x)
output12 = output_layer_12(x)
output13 = output_layer_13(x)
output14 = output_layer_14(x)
output15 = output_layer_15(x)

model = keras.Model(inputs=[inputs_tensor],
                    outputs=[output1, output2, output3, output4, output5,
                             output6, output7, output8, output9, output10,
                             output11, output12, output13, output14, output15],
                    name="functional_model")

model.summary()
print("Inputs count : ", model.inputs)
print("Outputs count : ", len(model.outputs))

opt_function = keras.optimizers.SGD(lr=0.01, decay=1e-1, momentum=0.9, nesterov=True)
#
model.compile(loss='binary_crossentropy',
               optimizer=opt_function,
              metrics=['accuracy'])
#

print(train_x,"\n",train_z)

model.fit(
    train_x, train_z,
    epochs=EPOCHS,
    batch_size=BATCH_SIZE
)

Generates errors:

Traceback (most recent call last):
  File "C:/Users/pc/source/repos/OneHotEncodingLayer__test/ny_nn___k_15_outputs.py", line 117, in <module>
    model.fit(
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
    return method(self, *args, **kwargs)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
    tmp_logs = train_function(iterator)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in __call__
    result = self._call(*args, **kwds)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\def_function.py", line 823, in _call
    self._initialize(args, kwds, add_initializers_to=initializers)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\def_function.py", line 696, in _initialize
    self._stateful_fn._get_concrete_function_internal_garbage_collected(  # pylint: disable=protected-access
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\function.py", line 2855, in _get_concrete_function_internal_garbage_collected
    graph_function, _, _ = self._maybe_define_function(args, kwargs)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\function.py", line 3213, in _maybe_define_function
    graph_function = self._create_graph_function(args, kwargs)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\function.py", line 3065, in _create_graph_function
    func_graph_module.func_graph_from_py_func(
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\framework\func_graph.py", line 986, in func_graph_from_py_func
    func_outputs = python_func(*func_args, **func_kwargs)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\eager\def_function.py", line 600, in wrapped_fn
    return weak_wrapped_fn().__wrapped__(*args, **kwds)
  File "C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\framework\func_graph.py", line 973, in wrapper
    raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:

    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function  *
        return step_function(self, iterator)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
        return fn(*args, **kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step  **
        outputs = model.train_step(data)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\training.py:748 train_step
        loss = self.compiled_loss(
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\engine\compile_utils.py:204 __call__
        loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\losses.py:149 __call__
        losses = ag_call(y_true, y_pred)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\losses.py:253 call  **
        return ag_fn(y_true, y_pred, **self._fn_kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
        return target(*args, **kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\losses.py:1605 binary_crossentropy
        K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
        return target(*args, **kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\keras\backend.py:4823 binary_crossentropy
        return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
        return target(*args, **kwargs)
    C:\ProgramData\Miniconda3\envs\by_nn\lib\site-packages\tensorflow\python\ops\nn_impl.py:173 sigmoid_cross_entropy_with_logits
        raise ValueError("logits and labels must have the same shape (%s vs %s)" %

    ValueError: logits and labels must have the same shape ((1, 1) vs (1, 15))


Process finished with exit code 1
  1. Is my implementation correct? If NOT, how can I correct the implementation?
  2. How can I resolve the errors?


from How can implement a multi-task deep learning in Keras?

No comments:

Post a Comment