Final script

Here is the full example of a Picsellia Integration, if you want a detailed explanation: you can start here.

from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf
import tensorflow_datasets as tfds
from picsellia import Client, Experiment
from picsellia.types.enums import LogType
import os
import numpy as np 

##Client initialization
print("Initializing Picsellia Client 🥑")

client = Client(
  api_token="XXXXXXX", 
  organization_name="my_organization",
  host="https://app.picsellia.com"
)

##Retrieve experiment to train
project = client.get_project("documentation_project")
experiment = project.get_experiment("exp-0-documentation")


##Retreive & download datasets
datasets = experiment.list_attached_dataset_versions()
print("Downloading Images for datasets ...")

for dataset in datasets:
    for label in dataset.list_labels():
        os.makedirs(os.path.join(dataset.version, label.name))
        assets = dataset.list_assets(q=f"annotations.classifications.label.name = \"{str(label.name)}\"").download(os.path.join(dataset.name, label.name))

print("Downloading Images for datasets ... ✅")


##Retrieve and download model weights
print("Downloading Weights for Model ... ")

base_model = experiment.get_base_model_version()
base_model_weights = base_model.get_file('weights')
base_model_weights.download()

print("Downloading Weights for Model ... ✅")

##Retrieve experiment training parameters
parameters = experiment.get_log(name='parameters').data
IMG_SIZE = parameters['image_size']
batch_size = parameters['batch_size']

##Harware
try:
    tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
    print("Device:", tpu.master())
    strategy = tf.distribute.TPUStrategy(tpu)
except ValueError:
    print("Not connected to a TPU runtime. Using CPU/GPU strategy")
    strategy = tf.distribute.MirroredStrategy()


##Preprocessing
size = (IMG_SIZE, IMG_SIZE)

train_img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255, rotation_range=20)
test_img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
ds_train = next(train_img_gen.flow_from_directory('train', target_size=size, batch_size=batch_size, seed=42))
ds_test = next(test_img_gen.flow_from_directory('test', target_size=size, batch_size=batch_size, seed=42))


NUM_CLASSES = len(experiment.get_dataset('train').list_labels())

img_augmentation = Sequential(
    [
        layers.RandomRotation(factor=0.15),
        layers.RandomTranslation(height_factor=0.1, width_factor=0.1),
        layers.RandomFlip(),
        layers.RandomContrast(factor=0.1),
    ],
    name="img_augmentation",
)

print("Dataloader Instantiated")

def build_model(num_classes):
    inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
    x = img_augmentation(inputs)
    model = EfficientNetB0(include_top=False, input_tensor=x, weights="imagenet")

    # Freeze the pretrained weights
    model.trainable = False

    # Rebuild top
    x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
    x = layers.BatchNormalization()(x)

    top_dropout_rate = 0.2
    x = layers.Dropout(top_dropout_rate, name="top_dropout")(x)
    outputs = layers.Dense(NUM_CLASSES, activation="softmax", name="pred")(x)

    # Compile
    model = tf.keras.Model(inputs, outputs, name="EfficientNet")
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
    model.compile(
        optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
    )
    return model

with strategy.scope():
    model = build_model(num_classes=NUM_CLASSES)

# Initializing Callbacks
class CustomPicselliaCallback(keras.callbacks.Callback):  

    def __init__(self, experiment: Experiment) -> None:
        self.experiment = experiment

    def on_epoch_end(self, epoch, logs=None):
        for k, v in logs.items():
            try:
                self.experiment.log(k, list(map(float, v)), LogType.LINE)
            except Exception as e:
                print(f"can't send {v}")

picsellia_callback = CustomPicselliaCallback(experiment=experiment)

"""
epochs = parameters['epochs']
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=2, callbacks=[picsellia_callback])

def unfreeze_model(model):
    # We unfreeze the top 20 layers while leaving BatchNorm layers frozen
    for layer in model.layers[-20:]:
        if not isinstance(layer, layers.BatchNormalization):
            layer.trainable = True

    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
    model.compile(
        optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
    )


unfreeze_model(model)
"""

epochs = parameters['epochs']
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=2, callbacks=[picsellia_callback])

##Compute & log evaluations
prediction = model.predict(ds_test)

picsellia_ds_test = experiment.get_dataset('test')
labels = list(ds_test.class_indices.keys())
labels_picsellia = {k: picsellia_ds_test.get_label(k) for k in labels}

for i, pred in enumerate(prediction):
    fname = ds_test.filenames[i].split('/')[-1]
    asset = picsellia_ds_test.find_asset(filename=fname)
    conf_score = float(np.max(pred))
    class_name = labels[np.argmax(pred)]
    picsellia_label = labels_picsellia[class_name]
    experiment.add_evaluation(asset, classifications=[(picsellia_label, conf_score)])

job = experiment.compute_evaluations_metrics(InferenceType.CLASSIFICATION)
job.wait_for_done()
   
##Store model files generated
model.save('weights.h5', save_format='h5')
experiment.store(name="model-weights", path='weights.h5')