diff --git a/.travis.yml b/.travis.yml index 90528940..bebf670a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,8 +3,8 @@ python: - '3.6' install: - pip install -r src/requirements.txt -- pip install tensorflow==1.13.1 -- pip install mkdocs==1.0.4 mkdocs-material==4.3.0 +- pip install tensorflow==2.0.* +- pip install mkdocs mkdocs-material script: - nosetests -vs src/tests - cd mkdocs && sh build_docs.sh diff --git a/Dockerfile.cpu b/Dockerfile.cpu index 584c03e6..cb59d52c 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -1,4 +1,4 @@ -FROM tensorflow/tensorflow:latest-py3 +FROM tensorflow/tensorflow:2.0.0-py3 # Install system packages RUN apt-get update && apt-get install -y --no-install-recommends \ diff --git a/Dockerfile.gpu b/Dockerfile.gpu index 6a3bbccb..19c2d027 100644 --- a/Dockerfile.gpu +++ b/Dockerfile.gpu @@ -1,4 +1,4 @@ -FROM tensorflow/tensorflow:latest-gpu-py3 +FROM tensorflow/tensorflow:2.0.0-gpu-py3 # Install system packages RUN apt-get update && apt-get install -y --no-install-recommends \ diff --git a/README.md b/README.md index 98c2069e..7fbde4c2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Image Quality Assessment [![Build Status](https://travis-ci.org/idealo/image-quality-assessment.svg?branch=master)](https://travis-ci.org/idealo/image-quality-assessment) +[![Docs](https://img.shields.io/badge/docs-online-brightgreen)](https://idealo.github.io/image-quality-assessment/) [![License](https://img.shields.io/badge/License-Apache%202.0-orange.svg)](https://github.com/idealo/image-quality-assessment/blob/master/LICENSE) This repository provides an implementation of an aesthetic and technical image quality model based on Google's research paper ["NIMA: Neural Image Assessment"](https://arxiv.org/pdf/1709.05424.pdf). You can find a quick introduction on their [Research Blog](https://research.googleblog.com/2017/12/introducing-nima-neural-image-assessment.html). @@ -43,37 +44,39 @@ MobileNet technical | TID2013 | 0.107 |0.652|0.675 ## Getting started -1. Install [Docker](https://docs.docker.com/install/) +1. Install [jq](https://stedolan.github.io/jq/download/) -2. Build docker image `docker build -t nima-cpu . -f Dockerfile.cpu` +2. Install [Docker](https://docs.docker.com/install/) + +3. Build docker image `docker build -t nima-cpu . -f Dockerfile.cpu` In order to train remotely on **AWS EC2** -3. Install [Docker Machine](https://docs.docker.com/machine/install-machine/) +4. Install [Docker Machine](https://docs.docker.com/machine/install-machine/) -4. Install [AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/userguide/installing.html) +5. Install [AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/userguide/installing.html) ## Predict In order to run predictions on an image or batch of images you can run the prediction script 1. Single image file -``` -./predict \ ---docker-image nima-cpu \ ---base-model-name MobileNet \ ---weights-file $(pwd)/models/MobileNet/weights_mobilenet_technical_0.11.hdf5 \ ---image-source $(pwd)/src/tests/test_images/42039.jpg -``` + ```bash + ./predict \ + --docker-image nima-cpu \ + --base-model-name MobileNet \ + --weights-file $(pwd)/models/MobileNet/weights_mobilenet_technical_0.11.hdf5 \ + --image-source $(pwd)/src/tests/test_images/42039.jpg + ``` 2. All image files in a directory -``` -./predict \ ---docker-image nima-cpu \ ---base-model-name MobileNet \ ---weights-file $(pwd)/models/MobileNet/weights_mobilenet_technical_0.11.hdf5 \ ---image-source $(pwd)/src/tests/test_images -``` + ```bash + ./predict \ + --docker-image nima-cpu \ + --base-model-name MobileNet \ + --weights-file $(pwd)/models/MobileNet/weights_mobilenet_technical_0.11.hdf5 \ + --image-source $(pwd)/src/tests/test_images + ``` ## Train locally on CPU @@ -81,87 +84,85 @@ In order to run predictions on an image or batch of images you can run the predi 1. Download dataset (see instructions under [Datasets](#datasets)) 2. Run the local training script (e.g. for TID2013 dataset) -``` -./train-local \ ---config-file $(pwd)/models/MobileNet/config_mobilenet_technical.json \ ---samples-file $(pwd)/data/TID2013/tid_labels_train.json \ ---image-dir /path/to/image/dir/local -``` + ```bash + ./train-local \ + --config-file $(pwd)/models/MobileNet/config_technical_cpu.json \ + --samples-file $(pwd)/data/TID2013/tid_labels_train.json \ + --image-dir /path/to/image/dir/local + ``` This will start a training container from the Docker image `nima-cpu` and create a timestamp train job folder under `train_jobs`, where the trained model weights and logs will be stored. The `--image-dir` argument requires the path of the image directory on your local machine. - In order to stop the last launched container run -``` -CONTAINER_ID=$(docker ps -l -q) -docker container stop $CONTAINER_ID -``` +In order to stop the last launched container run + ```bash + CONTAINER_ID=$(docker ps -l -q) + docker container stop $CONTAINER_ID + ``` - In order to stream logs from last launched container run -``` -CONTAINER_ID=$(docker ps -l -q) -docker logs $CONTAINER_ID --follow -``` +In order to stream logs from last launched container run + ```bash + CONTAINER_ID=$(docker ps -l -q) + docker logs $CONTAINER_ID --follow + ``` ## Train remotely on AWS EC2 1. Configure your AWS CLI. Ensure that your account has limits for GPU instances and read/write access to the S3 bucket specified in config file [[link](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html)] -``` -aws configure -``` + ```bash + aws configure + ``` 2. Launch EC2 instance with Docker Machine. Choose an Ubuntu AMI based on your region (https://cloud-images.ubuntu.com/locator/ec2/). For example, to launch a `p2.xlarge` EC2 instance named `ec2-p2` run (NB: change region, VPC ID and AMI ID as per your setup) -``` -docker-machine create --driver amazonec2 \ - --amazonec2-region eu-west-1 \ - --amazonec2-ami ami-58d7e821 \ - --amazonec2-instance-type p2.xlarge \ - --amazonec2-vpc-id vpc-abc \ - ec2-p2 -``` + ```bash + docker-machine create --driver amazonec2 \ + --amazonec2-region eu-west-1 \ + --amazonec2-ami ami-58d7e821 \ + --amazonec2-instance-type p2.xlarge \ + --amazonec2-vpc-id vpc-abc \ + ec2-p2 + ``` 3. ssh into EC2 instance - -``` -docker-machine ssh ec2-p2 -``` + ```bash + docker-machine ssh ec2-p2 + ``` 4. Update NVIDIA drivers and install **nvidia-docker** (see this [blog post](https://towardsdatascience.com/using-docker-to-set-up-a-deep-learning-environment-on-aws-6af37a78c551) for more details) + ```bash + # update NVIDIA drivers + sudo add-apt-repository ppa:graphics-drivers/ppa -y + sudo apt-get update + sudo apt-get install -y nvidia-375 nvidia-settings nvidia-modprobe -``` -# update NVIDIA drivers -sudo add-apt-repository ppa:graphics-drivers/ppa -y -sudo apt-get update -sudo apt-get install -y nvidia-375 nvidia-settings nvidia-modprobe - -# install nvidia-docker -wget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb -sudo dpkg -i /tmp/nvidia-docker_1.0.1-1_amd64.deb && rm /tmp/nvidia-docker_1.0.1-1_amd64.deb -``` + # install nvidia-docker + wget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb + sudo dpkg -i /tmp/nvidia-docker_1.0.1-1_amd64.deb && rm /tmp/nvidia-docker_1.0.1-1_amd64.deb + ``` 5. Download dataset to EC2 instance (see instructions under [Datasets](#datasets)). We recommend to save the AMI with the downloaded data for future use. - 6. Run the remote EC2 training script (e.g. for AVA dataset) -``` -./train-ec2 \ ---docker-machine ec2-p2 \ ---config-file $(pwd)/models/MobileNet/config_mobilenet_aesthetic.json \ ---samples-file $(pwd)/data/AVA/ava_labels_train.json \ ---image-dir /path/to/image/dir/remote -``` + ```bash + ./train-ec2 \ + --docker-machine ec2-p2 \ + --config-file $(pwd)/models/MobileNet/config_aesthetic_gpu.json \ + --samples-file $(pwd)/data/AVA/ava_labels_train.json \ + --image-dir /path/to/image/dir/remote + ``` The training progress will be streamed to your terminal. After the training has finished, the train outputs (logs and best model weights) will be stored on S3 in a timestamped folder. The S3 output bucket can be specified in the **config file**. The `--image-dir` argument requires the path of the image directory on your remote instance. ## Contribute We welcome all kinds of contributions and will publish the performances from new models in the performance table under [Trained models](#trained-models). -For example, to train a new aesthetic NIMA model based on InceptionV3 ImageNet weights, you just have to change the `base_model_name` parameter in the config file `models/MobileNet/config_mobilenet_aesthetic.json` to "InceptionV3". You can also control all major hyperparameters in the config file, like learning rate, batch size, or dropout rate. +For example, to train a new aesthetic NIMA model based on InceptionV3 ImageNet weights, you just have to change the `base_model_name` parameter in the config file `models/MobileNet/config_aesthetic_gpu.json` to "InceptionV3". You can also control all major hyperparameters in the config file, like learning rate, batch size, or dropout rate. See the [Contribution](CONTRIBUTING.md) guide for more details. ## Datasets This project uses two datasets to train the NIMA model: + 1. [**AVA**](https://github.com/ylogx/aesthetics/tree/master/data/ava) used for aesthetic ratings ([data](http://academictorrents.com/details/71631f83b11d3d79d8f84efe0a7e12f0ac001460)) 2. [**TID2013**](http://www.ponomarenko.info/tid2013.htm) used for technical ratings @@ -211,20 +212,20 @@ To get predictions from the aesthetic or technical model: 1. Build the NIMA TFS Docker image `docker build -t tfs_nima contrib/tf_serving` 2. Run a NIMA TFS container with `docker run -d --name tfs_nima -p 8500:8500 tfs_nima` 3. Install python dependencies to run TF serving sample client -``` -virtualenv -p python3 contrib/tf_serving/venv_tfs_nima -source contrib/tf_serving/venv_tfs_nima/bin/activate -pip install -r contrib/tf_serving/requirements.txt -``` + ``` + virtualenv -p python3 contrib/tf_serving/venv_tfs_nima + source contrib/tf_serving/venv_tfs_nima/bin/activate + pip install -r contrib/tf_serving/requirements.txt + ``` 4. Get predictions from aesthetic or technical model by running the sample client -``` -python -m contrib.tf_serving.tfs_sample_client --image-path src/tests/test_images/42039.jpg --model-name mobilenet_aesthetic -python -m contrib.tf_serving.tfs_sample_client --image-path src/tests/test_images/42039.jpg --model-name mobilenet_technical -``` + ``` + python -m contrib.tf_serving.tfs_sample_client --image-path src/tests/test_images/42039.jpg --model-name mobilenet_aesthetic + python -m contrib.tf_serving.tfs_sample_client --image-path src/tests/test_images/42039.jpg --model-name mobilenet_technical + ``` ## Cite this work Please cite Image Quality Assessment in your publications if this is useful for your research. Here is an example BibTeX entry: -``` +```BibTeX @misc{idealods2018imagequalityassessment, title={Image Quality Assessment}, author={Christopher Lennan and Hao Nguyen and Dat Tran}, diff --git a/contrib/tf_serving/save_tfs_model.py b/contrib/tf_serving/save_tfs_model.py index d177b5cc..82042bc2 100644 --- a/contrib/tf_serving/save_tfs_model.py +++ b/contrib/tf_serving/save_tfs_model.py @@ -1,7 +1,7 @@ -import keras.backend as K +import tensorflow.keras.backend as K import argparse -from keras.applications.mobilenet import DepthwiseConv2D, relu6 -from keras.utils.generic_utils import CustomObjectScope +from tensorflow.keras.applications.mobilenet import DepthwiseConv2D, relu6 +from tensorflow.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model.signature_def_utils_impl import \ diff --git a/contrib/tf_serving/tfs_sample_client.py b/contrib/tf_serving/tfs_sample_client.py index 3055bf78..5208db9d 100644 --- a/contrib/tf_serving/tfs_sample_client.py +++ b/contrib/tf_serving/tfs_sample_client.py @@ -1,6 +1,6 @@ import json import argparse -import keras +import tensorflow.keras as keras import numpy as np import tensorflow as tf from src.utils import utils diff --git a/mkdocs/mkdocs.yml b/mkdocs/mkdocs.yml index e0231373..b4fc973a 100644 --- a/mkdocs/mkdocs.yml +++ b/mkdocs/mkdocs.yml @@ -14,7 +14,6 @@ nav: - Trainer: - Train: trainer/train.md - Utils: - - Keras Utils: utils/keras_utils.md - Losses: utils/losses.md - Utils: utils/utils.md - Contribution: CONTRIBUTING.md @@ -34,4 +33,4 @@ google_analytics: - 'auto' markdown_extensions: - - codehilite \ No newline at end of file + - codehilite diff --git a/models/MobileNet/config_aesthetic_cpu.json b/models/MobileNet/config_aesthetic_cpu.json new file mode 100644 index 00000000..6b971dd3 --- /dev/null +++ b/models/MobileNet/config_aesthetic_cpu.json @@ -0,0 +1,18 @@ +{ + "train_env": "remote", + "docker_image": "nima-cpu", + "base_model_name": "MobileNet", + "existing_weights": null, + "n_classes": 10, + "batch_size": 96, + "epochs_train_dense": 5, + "learning_rate_dense": 0.001, + "decay_dense": 0, + "epochs_train_all": 9, + "learning_rate_all": 0.00003, + "decay_all": 0.000023, + "l2_reg": null, + "dropout_rate": 0.75, + "multiprocessing_data_load": false, + "num_workers_data_load": 1 +} diff --git a/models/MobileNet/config_mobilenet_aesthetic.json b/models/MobileNet/config_aesthetic_gpu.json similarity index 100% rename from models/MobileNet/config_mobilenet_aesthetic.json rename to models/MobileNet/config_aesthetic_gpu.json diff --git a/models/MobileNet/config_technical_cpu.json b/models/MobileNet/config_technical_cpu.json new file mode 100644 index 00000000..8ef4fec1 --- /dev/null +++ b/models/MobileNet/config_technical_cpu.json @@ -0,0 +1,18 @@ +{ + "train_env": "TID2013", + "docker_image": "nima-cpu", + "base_model_name": "MobileNet", + "existing_weights": null, + "n_classes": 10, + "batch_size": 8, + "epochs_train_dense": 1, + "learning_rate_dense": 0.001, + "decay_dense": 0, + "epochs_train_all": 5, + "learning_rate_all": 0.0000003, + "decay_all": 0, + "dropout_rate": 0.75, + "multiprocessing_data_load": false, + "num_workers_data_load": 10, + "img_format": "bmp" +} diff --git a/models/MobileNet/config_mobilenet_technical.json b/models/MobileNet/config_technical_gpu.json similarity index 100% rename from models/MobileNet/config_mobilenet_technical.json rename to models/MobileNet/config_technical_gpu.json diff --git a/src/handlers/data_generator.py b/src/handlers/data_generator.py index 65e772fe..08af73ce 100644 --- a/src/handlers/data_generator.py +++ b/src/handlers/data_generator.py @@ -1,11 +1,11 @@ import os import numpy as np -import keras +import tensorflow as tf from utils import utils -class TrainDataGenerator(keras.utils.Sequence): +class TrainDataGenerator(tf.keras.utils.Sequence): '''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator''' def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess, img_format, img_load_dims=(256, 256), img_crop_dims=(224, 224), shuffle=True): @@ -58,7 +58,7 @@ def __data_generator(self, batch_samples): return X, y -class TestDataGenerator(keras.utils.Sequence): +class TestDataGenerator(tf.keras.utils.Sequence): '''inherits from Keras Sequence base object, allows to use multiprocessing in .fit_generator''' def __init__(self, samples, img_dir, batch_size, n_classes, basenet_preprocess, img_format, img_load_dims=(224, 224)): diff --git a/src/handlers/model_builder.py b/src/handlers/model_builder.py index ccbcef78..ee15276e 100644 --- a/src/handlers/model_builder.py +++ b/src/handlers/model_builder.py @@ -1,8 +1,8 @@ import importlib -from keras.models import Model -from keras.layers import Dropout, Dense -from keras.optimizers import Adam +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Dropout, Dense +from tensorflow.keras.optimizers import Adam from utils.losses import earth_movers_distance @@ -21,11 +21,11 @@ def __init__(self, base_model_name, n_classes=10, learning_rate=0.001, dropout_r def _get_base_module(self): # import Keras base model module if self.base_model_name == 'InceptionV3': - self.base_module = importlib.import_module('keras.applications.inception_v3') + self.base_module = importlib.import_module('tensorflow.keras.applications.inception_v3') elif self.base_model_name == 'InceptionResNetV2': - self.base_module = importlib.import_module('keras.applications.inception_resnet_v2') + self.base_module = importlib.import_module('tensorflow.keras.applications.inception_resnet_v2') else: - self.base_module = importlib.import_module('keras.applications.'+self.base_model_name.lower()) + self.base_module = importlib.import_module('tensorflow.keras.applications.'+self.base_model_name.lower()) def build(self): # get base model class diff --git a/src/requirements.txt b/src/requirements.txt index 08d1bbeb..2fe4645d 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,3 +1,3 @@ -keras==2.1.* -nose==1.3.* +nose +sklearn pillow==5.0.* diff --git a/src/trainer/train.py b/src/trainer/train.py index 0ba8b7ba..6ccaf647 100644 --- a/src/trainer/train.py +++ b/src/trainer/train.py @@ -1,70 +1,84 @@ - import os import argparse -from keras import backend as K -from keras.callbacks import ModelCheckpoint +from tensorflow.keras import backend as K +from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard from sklearn.model_selection import train_test_split from handlers.data_generator import TrainDataGenerator, TestDataGenerator from handlers.model_builder import Nima from handlers.samples_loader import load_samples from handlers.config_loader import load_config from utils.utils import ensure_dir_exists -from utils.keras_utils import TensorBoardBatch - - -def train(base_model_name, - n_classes, - samples, - image_dir, - batch_size, - epochs_train_dense, - epochs_train_all, - learning_rate_dense, - learning_rate_all, - dropout_rate, - job_dir, - img_format='jpg', - existing_weights=None, - multiprocessing_data_load=False, - num_workers_data_load=2, - decay_dense=0, - decay_all=0, - **kwargs): + + +def train( + base_model_name, + n_classes, + samples, + image_dir, + batch_size, + epochs_train_dense, + epochs_train_all, + learning_rate_dense, + learning_rate_all, + dropout_rate, + job_dir, + img_format='jpg', + existing_weights=None, + multiprocessing_data_load=False, + num_workers_data_load=2, + decay_dense=0, + decay_all=0, + **kwargs +): # build NIMA model and load existing weights if they were provided in config - nima = Nima(base_model_name, n_classes, learning_rate_dense, dropout_rate, decay=decay_dense) + nima = Nima( + base_model_name, n_classes, learning_rate_dense, dropout_rate, decay=decay_dense + ) nima.build() if existing_weights is not None: nima.nima_model.load_weights(existing_weights) # split samples in train and validation set, and initialize data generators - samples_train, samples_test = train_test_split(samples, test_size=0.05, shuffle=True, random_state=10207) - - training_generator = TrainDataGenerator(samples_train, - image_dir, - batch_size, - n_classes, - nima.preprocessing_function(), - img_format=img_format) - - validation_generator = TestDataGenerator(samples_test, - image_dir, - batch_size, - n_classes, - nima.preprocessing_function(), - img_format=img_format) - - # initialize callbacks TensorBoardBatch and ModelCheckpoint - tensorboard = TensorBoardBatch(log_dir=os.path.join(job_dir, 'logs')) - - model_save_name = 'weights_'+base_model_name.lower()+'_{epoch:02d}_{val_loss:.3f}.hdf5' + samples_train, samples_test = train_test_split( + samples, test_size=0.05, shuffle=True, random_state=10207 + ) + + training_generator = TrainDataGenerator( + samples_train, + image_dir, + batch_size, + n_classes, + nima.preprocessing_function(), + img_format=img_format, + ) + + validation_generator = TestDataGenerator( + samples_test, + image_dir, + batch_size, + n_classes, + nima.preprocessing_function(), + img_format=img_format, + ) + + # initialize callbacks TensorBoard and ModelCheckpoint + tensorboard = TensorBoard( + log_dir=os.path.join(job_dir, 'logs'), update_freq='batch' + ) + + model_save_name = ( + 'weights_' + base_model_name.lower() + '_{epoch:02d}_{val_loss:.3f}.hdf5' + ) model_file_path = os.path.join(job_dir, 'weights', model_save_name) - model_checkpointer = ModelCheckpoint(filepath=model_file_path, - monitor='val_loss', - verbose=1, - save_best_only=True, - save_weights_only=True) + model_checkpointer = ModelCheckpoint( + filepath=model_file_path, + monitor='val_loss', + verbose=1, + save_best_only=True, + save_weights_only=True, + ) # start training only dense layers for layer in nima.base_model.layers: @@ -73,14 +87,16 @@ def train(base_model_name, nima.compile() nima.nima_model.summary() - nima.nima_model.fit_generator(generator=training_generator, - validation_data=validation_generator, - epochs=epochs_train_dense, - verbose=1, - use_multiprocessing=multiprocessing_data_load, - workers=num_workers_data_load, - max_q_size=30, - callbacks=[tensorboard, model_checkpointer]) + nima.nima_model.fit_generator( + generator=training_generator, + validation_data=validation_generator, + epochs=epochs_train_dense, + verbose=1, + use_multiprocessing=multiprocessing_data_load, + workers=num_workers_data_load, + max_queue_size=30, + callbacks=[tensorboard, model_checkpointer], + ) # start training all layers for layer in nima.base_model.layers: @@ -91,15 +107,17 @@ def train(base_model_name, nima.compile() nima.nima_model.summary() - nima.nima_model.fit_generator(generator=training_generator, - validation_data=validation_generator, - epochs=epochs_train_dense+epochs_train_all, - initial_epoch=epochs_train_dense, - verbose=1, - use_multiprocessing=multiprocessing_data_load, - workers=num_workers_data_load, - max_q_size=30, - callbacks=[tensorboard, model_checkpointer]) + nima.nima_model.fit_generator( + generator=training_generator, + validation_data=validation_generator, + epochs=epochs_train_dense + epochs_train_all, + initial_epoch=epochs_train_dense, + verbose=1, + use_multiprocessing=multiprocessing_data_load, + workers=num_workers_data_load, + max_queue_size=30, + callbacks=[tensorboard, model_checkpointer], + ) K.clear_session() @@ -107,8 +125,15 @@ def train(base_model_name, if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('-j', '--job-dir', help='train job directory with samples and config file', required=True) - parser.add_argument('-i', '--image-dir', help='directory with image files', required=True) + parser.add_argument( + '-j', + '--job-dir', + help='train job directory with samples and config file', + required=True, + ) + parser.add_argument( + '-i', '--image-dir', help='directory with image files', required=True + ) args = parser.parse_args() diff --git a/src/utils/keras_utils.py b/src/utils/keras_utils.py deleted file mode 100644 index 6b986a34..00000000 --- a/src/utils/keras_utils.py +++ /dev/null @@ -1,39 +0,0 @@ - -import keras.backend as K -from keras.callbacks import TensorBoard - - -class TensorBoardBatch(TensorBoard): - def __init__(self, *args, **kwargs): - super(TensorBoardBatch, self).__init__(*args, **kwargs) - self.tf = __import__('tensorflow') - self.batch_counter = 0 - - def on_batch_end(self, batch, logs=None): - self.batch_counter += 1 - logs = logs or {} - logs['lr'] = K.get_value(self.model.optimizer.lr) - for name, value in logs.items(): - if name in ['batch', 'size']: - continue - summary = self.tf.Summary() - summary_value = summary.value.add() - summary_value.simple_value = value.item() - summary_value.tag = name - self.writer.add_summary(summary, self.batch_counter) - - self.writer.flush() - - def on_epoch_end(self, epoch, logs=None): - logs = logs or {} - logs['lr'] = K.get_value(self.model.optimizer.lr) - for name, value in logs.items(): - if name in ['batch', 'size']: - continue - summary = self.tf.Summary() - summary_value = summary.value.add() - summary_value.simple_value = value.item() - summary_value.tag = name - self.writer.add_summary(summary, epoch) - - self.writer.flush() diff --git a/src/utils/losses.py b/src/utils/losses.py index a9454f88..cf78ab26 100644 --- a/src/utils/losses.py +++ b/src/utils/losses.py @@ -1,5 +1,5 @@ -from keras import backend as K +from tensorflow.keras import backend as K def earth_movers_distance(y_true, y_pred): diff --git a/src/utils/utils.py b/src/utils/utils.py index 789dc8cb..fdbbcc74 100644 --- a/src/utils/utils.py +++ b/src/utils/utils.py @@ -1,7 +1,7 @@ import os import json -import keras +import tensorflow as tf import numpy as np @@ -36,7 +36,7 @@ def random_horizontal_flip(img): def load_image(img_file, target_size): - return np.asarray(keras.preprocessing.image.load_img(img_file, target_size=target_size)) + return np.asarray(tf.keras.preprocessing.image.load_img(img_file, target_size=target_size)) def normalize_labels(labels):