TensorFlow Generator#
BESSER provides a code generator for TensorFlow, which is a popular open-source library for deep learning. This generator transforms B-UML Neural Network models into TensorFlow code, allowing you to create neural networks based on your B-UML specifications.
To use the TensorFlow generator, you need to create a TFGenerator
object, provide the
B-UML Neural Network model, and use the generate
method as follows:
from besser.generators.nn.tf.tf_code_generator import TFGenerator
tf_model = TFGenerator(
model=nn_model, output_dir="output_folder", generation_type="subclassing"
)
tf_model.generate()
The configuration parameters for the TFGenerator are as follows:
model: The neural network model.
output_dir: The name of the output directory where the
tf_nn_subclassing.py
file will be generated.generation_type: The type of NN architecture. Either
subclassing
orsequential
.
The tf_nn_subclassing.py
file will be generated inside output_folder
and it will look as follows:
1"""TensorFlow code generated based on BUML."""
2
3import tensorflow as tf
4from keras import layers
5
6
7from datetime import datetime
8from sklearn.metrics import classification_report
9
10from besser.generators.nn.utils_nn import compute_mean_std
11
12
13# Define the network architecture
14class NeuralNetwork(tf.keras.Model):
15 def __init__(self):
16 super().__init__()
17 self.l1 = layers.Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu')
18 self.l2 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')
19 self.l3 = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu')
20 self.l4 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')
21 self.l5 = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu')
22 self.l6 = layers.Flatten()
23 self.l7 = layers.Dense(units=64, activation='relu')
24 self.l8 = layers.Dense(units=10, activation=None)
25
26
27 def call(self, x):
28 x = self.l1(x)
29 x = self.l2(x)
30 x = self.l3(x)
31 x = self.l4(x)
32 x = self.l5(x)
33 x = self.l6(x)
34 x = self.l7(x)
35 x = self.l8(x)
36 return x
37
38
39
40
41# Dataset preparation
42IMAGE_SIZE = (32, 32)
43
44# Function to load and preprocess images
45scale, _, _ = compute_mean_std("dataset/cifar10/train", num_samples=100,
46 target_size=IMAGE_SIZE)
47def preprocess_image(image, label, to_scale):
48 if to_scale:
49 image = tf.cast(image, tf.float32) / 255.0
50 return image, label
51
52
53# Load dataset (resizes by default)
54def load_dataset(directory, mode, image_size):
55 dataset = tf.keras.preprocessing.image_dataset_from_directory(
56 directory=directory,
57 label_mode="int",
58 image_size=image_size,
59 batch_size=32,
60 shuffle=True if mode == 'train' else False,
61 )
62 # Apply preprocessing
63 dataset = dataset.map(
64 lambda image, label: preprocess_image(image, label, scale))
65 # Prefetch for performance optimization
66 AUTOTUNE = tf.data.AUTOTUNE
67 dataset = dataset.prefetch(buffer_size=AUTOTUNE)
68 return dataset
69
70# Load datasets
71train_loader = load_dataset("dataset/cifar10/train", "train", IMAGE_SIZE)
72test_loader = load_dataset("dataset/cifar10/test", "test", IMAGE_SIZE)
73
74
75# Define the network, loss function, and optimizer
76my_model = NeuralNetwork()
77criterion = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
78
79optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
80
81# Train the neural network
82print('##### Training the model')
83for epoch in range(10):
84 # Initialize the running loss for the current epoch
85 running_loss = 0.0
86 total_loss = 0.0
87 # Iterate over mini-batches of training data
88 for i, (inputs, labels) in enumerate(train_loader):
89 with tf.GradientTape() as tape:
90 outputs = my_model(inputs, training=True)
91 # Convert labels to one-hot encoding
92 if labels.shape.rank > 1 and labels.shape[-1] == 1:
93 labels = tf.squeeze(labels, axis=-1)
94 labels = tf.cast(labels, dtype=tf.int32)
95 labels = tf.one_hot(labels, depth=10
96 )
97 loss = criterion(labels, outputs)
98 # Compute gradients and update model parameters
99 gradients = tape.gradient(loss, my_model.trainable_variables)
100 optimizer.apply_gradients(
101 zip(gradients, my_model.trainable_variables))
102 total_loss += loss.numpy()
103 running_loss += loss.numpy()
104 if i % 200 == 199: # Print every 200 mini-batches
105 print(
106 f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 200:.3f}"
107 )
108 running_loss = 0.0
109 print(
110 f"[{epoch + 1}] overall loss for epoch: "
111 f"{total_loss / len(train_loader):.3f}"
112 )
113 total_loss = 0.0
114print('Training finished')
115
116# Evaluate the neural network
117print('##### Evaluating the model')
118predicted_labels = []
119true_labels = []
120test_loss = 0.0
121
122for inputs, labels in test_loader:
123 outputs = my_model(inputs, training=False)
124 true_labels.extend(labels.numpy())
125 predicted = tf.argmax(outputs, axis=-1).numpy()
126 if labels.shape.rank > 1 and labels.shape[-1] == 1:
127 labels = tf.squeeze(labels, axis=-1)
128 labels = tf.cast(labels, dtype=tf.int32)
129 labels = tf.one_hot(labels, depth=10
130 )
131 predicted_labels.extend(predicted)
132 test_loss += criterion(labels, outputs).numpy()
133
134
135average_loss = test_loss / len(test_loader)
136print(f"Test Loss: {average_loss:.3f}")
137
138# Calculate the metrics
139metrics = ['f1-score']
140report = classification_report(true_labels, predicted_labels,
141 output_dict=True)
142for metric in metrics:
143 metric_list = []
144 for class_label in report.keys():
145 if class_label not in ('macro avg', 'weighted avg', 'accuracy'):
146 print(f"{metric.capitalize()} for class {class_label}:",
147 report[class_label][metric])
148 metric_list.append(report[class_label][metric])
149 metric_value = sum(metric_list) / len(metric_list)
150 print(f"Average {metric.capitalize()}: {metric_value:.2f}")
151 print(f"Accuracy: {report['accuracy']}")
152
153
154# Save the neural network
155print('##### Saving the model')
156my_model.save(f"my_model_{datetime.now}")
157print("The model is saved successfully")