Skip to content

Commit

Permalink
Add benchmarks (albumentations-team#16)
Browse files Browse the repository at this point in the history
  • Loading branch information
creafz authored Jul 1, 2018
1 parent 81b58c5 commit ed1cf17
Show file tree
Hide file tree
Showing 4 changed files with 338 additions and 0 deletions.
20 changes: 20 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,26 @@ pip install -U git+https://github.com/albu/albumentations
The full documentation is available at [albumentations.readthedocs.io](https://albumentations.readthedocs.io/en/latest/).


## Benchmarking results
To run the benchmark yourself follow the instructions in [benchmark/README.md](benchmark/README.md)

Results for running the benchmark on first 2000 images from the ImageNet validation set using an Intel Core i7-7800X CPU. All times are in seconds, lower is better.

| | albumentations | imgaug | torchvision<br> (Pillow backend)| torchvision<br> (Pillow-SIMD backend) | Keras |
|-------------------|:---------------:|:--------:|:-------------------------------:|:-------------------------------------:|:--------:|
| RandomCrop64 | **0.0017** | - | 0.0182 | 0.0182 | - |
| PadToSize512 | **0.2413** | - | 2.493 | 2.3682 | - |
| HorizontalFlip | 0.7765 | 2.2299 | **0.3031** | 0.3054 | 2.0508 |
| VerticalFlip | **0.178** | 0.3899 | 0.2326 | 0.2308 | 0.1799 |
| Rotate | **3.8538** | 4.0581 | 16.16 | 9.5011 | 50.8632 |
| ShiftScaleRotate | **2.0605** | 2.4478 | 18.5401 | 10.6062 | 47.0568 |
| Brightness | 2.5301 |**2.3607**| 4.6854 | 3.4814 | 9.9237 |
| ShiftHSV | **10.3925** | 14.2255 | 34.7778 | 27.0215 | - |
| ShiftRGB | 4.3094 |**2.1989**| - | - | 3.0598 |
| Gamma | 1.4832 | - | **1.1397** | 1.1447 | - |
| Grayscale | **1.2048** | 5.3895 | 1.6826 | 1.2721 | - |


## Contributing
1. Clone the repository:
```
Expand Down
26 changes: 26 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
## Running the benchmark

1. Install requirements
```
pip install -r requirements.txt
```
2. Prepare a directory with images
3. Run the benchmark
```
python benchmark.py --data-dir <path to directory with images> --images <number of images> --runs <number of runs>
```
for example
```
python benchmark.py --data-dir '/hdd/ILSVRC2012_img_val' --images 2000 --runs 5
```

To use Pillow-SIMD instead of Pillow as a torchvision backend:

1. Uninstall Pillow
```
pip uninstall -y pillow
```
2. Install Pillow-SIMD
```
pip install pillow-simd
```
282 changes: 282 additions & 0 deletions benchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,282 @@
import argparse
import os
from timeit import Timer
from collections import defaultdict

from PIL import Image
import cv2
from tqdm import tqdm
import numpy as np
import pandas as pd
import torchvision.transforms.functional as torchvision
import keras as _
import keras_preprocessing.image as keras
from imgaug import augmenters as iaa

import albumentations.augmentations.functional as albumentations


def parse_args():
parser = argparse.ArgumentParser(description='Augmentation libraries performance benchmark')
parser.add_argument('-d', '--data-dir', required=True, metavar='DIR', help='path to a directory with images')
parser.add_argument('-i', '--images', default=2000, type=int, metavar='N',
help='number of images for benchmarking (default: 2000)')
parser.add_argument('-r', '--runs', default=5, type=int, metavar='N',
help='number of runs for each benchmark (default: 5)')
parser.add_argument('--show-std', dest='show_std', action='store_true',
help='show standard deviation for benchmark runs')
return parser.parse_args()


def read_img_pillow(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')


def read_img_cv2(filepath):
img = cv2.imread(filepath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img


def format_results(run_times_for_aug, show_std=False):
if run_times_for_aug is None:
return '-'
result = '{:.4f}'.format(np.mean(run_times_for_aug))
if show_std:
result += ' ± {:.4f}'.format(np.std(run_times_for_aug))
return result


class BenchmarkTest:

def __str__(self):
return self.__class__.__name__

def run(self, library, imgs):
transform = getattr(self, library)
for img in imgs:
transform(img)


class HorizontalFlip(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Fliplr(p=1)

def albumentations(self, img):
return albumentations.hflip(img)

def torchvision(self, img):
return torchvision.hflip(img)

def keras(self, img):
return np.ascontiguousarray(keras.flip_axis(img, axis=1))

def imgaug(self, img):
return np.ascontiguousarray(self.imgaug_transform.augment_image(img))


class VerticalFlip(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Flipud(p=1)

def albumentations(self, img):
return albumentations.vflip(img)

def torchvision(self, img):
return torchvision.vflip(img)

def keras(self, img):
return np.ascontiguousarray(keras.flip_axis(img, axis=0))

def imgaug(self, img):
return np.ascontiguousarray(self.imgaug_transform.augment_image(img))


class Rotate(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Affine(rotate=(45, 45), order=1, mode='reflect')

def albumentations(self, img):
return albumentations.rotate(img, angle=-45)

def torchvision(self, img):
return torchvision.rotate(img, angle=-45, resample=Image.BILINEAR)

def keras(self, img):
return keras.apply_affine_transform(img, theta=45, channel_axis=2, fill_mode='reflect')

def imgaug(self, img):
return self.imgaug_transform.augment_image(img)


class Brightness(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Multiply((1.5, 1.5), per_channel=False)

def albumentations(self, img):
return albumentations.random_brightness(img, alpha=1.5)

def torchvision(self, img):
return torchvision.adjust_brightness(img, brightness_factor=1.5)

def keras(self, img):
return keras.apply_brightness_shift(img, brightness=1.5).astype(np.uint8)

def imgaug(self, img):
return self.imgaug_transform.augment_image(img)


class ShiftScaleRotate(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Affine(
scale=(2, 2),
rotate=(45, 45),
translate_px=(50, 50),
order=1,
mode='reflect',
)

def albumentations(self, img):
return albumentations.shift_scale_rotate(img, angle=-45, scale=2, dx=0.2, dy=0.2)

def torchvision(self, img):
return torchvision.affine(img, angle=45, translate=(50, 50), scale=2, shear=0, resample=Image.BILINEAR)

def keras(self, img):
return keras.apply_affine_transform(img, theta=45, tx=50, ty=50, zx=0.5, zy=0.5, fill_mode='reflect')

def imgaug(self, img):
return self.imgaug_transform.augment_image(img)


class ShiftHSV(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.AddToHueAndSaturation((20, 20), per_channel=False)

def albumentations(self, img):
return albumentations.shift_hsv(img, hue_shift=20, sat_shift=20, val_shift=20)

def torchvision(self, img):
img = torchvision.adjust_hue(img, hue_factor=0.1)
img = torchvision.adjust_saturation(img, saturation_factor=1.2)
img = torchvision.adjust_brightness(img, brightness_factor=1.2)
return img

def imgaug(self, img):
return self.imgaug_transform.augment_image(img)


class RandomCrop64(BenchmarkTest):
def albumentations(self, img):
return albumentations.random_crop(img, crop_height=64, crop_width=64, h_start=0, w_start=0)

def torchvision(self, img):
return torchvision.crop(img, i=0, j=0, h=64, w=64)


class ShiftRGB(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Add((100, 100), per_channel=False)

def albumentations(self, img):
return albumentations.shift_rgb(img, r_shift=100, g_shift=100, b_shift=100)

def keras(self, img):
return keras.apply_channel_shift(img, intensity=100, channel_axis=2)

def imgaug(self, img):
return self.imgaug_transform.augment_image(img)


class PadToSize512(BenchmarkTest):

def albumentations(self, img):
return albumentations.pad(img, min_height=512, min_width=512)

def torchvision(self, img):
if img.size[0] < 512:
img = torchvision.pad(img, (int((1 + 512 - img.size[0]) / 2), 0), padding_mode='reflect')
if img.size[1] < 512:
img = torchvision.pad(img, (0, int((1 + 512 - img.size[1]) / 2)), padding_mode='reflect')
return img


class Gamma(BenchmarkTest):

def albumentations(self, img):
return albumentations.gamma_transform(img, gamma=0.5)

def torchvision(self, img):
return torchvision.adjust_gamma(img, gamma=0.5)


class Grayscale(BenchmarkTest):

def __init__(self):
self.imgaug_transform = iaa.Grayscale(alpha=1.0)

def albumentations(self, img):
return albumentations.to_gray(img)

def torchvision(self, img):
return torchvision.to_grayscale(img, num_output_channels=3)

def imgaug(self, img):
return self.imgaug_transform.augment_image(img)


def main():
args = parse_args()
run_times = defaultdict(dict)
libraries = ['albumentations', 'imgaug', 'torchvision', 'keras']
data_dir = args.data_dir
paths = list(sorted(os.listdir(data_dir)))
paths = paths[:args.images]
imgs_cv2 = [read_img_cv2(os.path.join(data_dir, path)) for path in paths]
imgs_pillow = [read_img_pillow(os.path.join(data_dir, path)) for path in paths]
for library in libraries:
imgs = imgs_pillow if library == 'torchvision' else imgs_cv2
benchmarks = [
HorizontalFlip(),
VerticalFlip(),
Rotate(),
ShiftScaleRotate(),
Brightness(),
ShiftRGB(),
ShiftHSV(),
Gamma(),
Grayscale(),
RandomCrop64(),
PadToSize512(),
]
pbar = tqdm(total=len(benchmarks))
for benchmark in benchmarks:
pbar.set_description('Current benchmark: {} | {}'.format(library, benchmark))
run_time = None
if hasattr(benchmark, library):
timer = Timer(lambda: benchmark.run(library, imgs))
run_time = timer.repeat(number=1, repeat=args.runs)
run_times[library][str(benchmark)] = run_time
pbar.update(1)
pbar.close()
pd.set_option('display.width', 1000)
df = pd.DataFrame.from_dict(run_times)
df = df.applymap(lambda r: format_results(r, args.show_std))
df = df[libraries]
augmentations = ['RandomCrop64', 'PadToSize512', 'HorizontalFlip', 'VerticalFlip', 'Rotate', 'ShiftScaleRotate',
'Brightness', 'ShiftHSV', 'ShiftRGB', 'Gamma', 'Grayscale']
df = df.reindex(augmentations)
print(df.head(len(augmentations)))


if __name__ == '__main__':
main()
10 changes: 10 additions & 0 deletions benchmark/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
opencv-python
pillow
numpy
pandas
tqdm
tensorflow # required for keras
keras
imgaug
albumentations
torchvision

0 comments on commit ed1cf17

Please sign in to comment.