Skip to content

Commit

Permalink
clean for release
Browse files Browse the repository at this point in the history
  • Loading branch information
ssundaram21 committed Jul 30, 2024
1 parent 38b8f37 commit d243d99
Show file tree
Hide file tree
Showing 10 changed files with 29 additions and 203 deletions.
19 changes: 0 additions & 19 deletions configs/eval.yaml

This file was deleted.

8 changes: 0 additions & 8 deletions configs/eval_baseline.yaml

This file was deleted.

6 changes: 0 additions & 6 deletions configs/eval_checkpoint.yaml

This file was deleted.

19 changes: 0 additions & 19 deletions configs/eval_dino.yaml

This file was deleted.

22 changes: 10 additions & 12 deletions configs/eval_ensemble.yaml
Original file line number Diff line number Diff line change
@@ -1,19 +1,17 @@
tag: "open_clip"

eval_checkpoint: "/vision-nfs/isola/projects/shobhita/code/dreamsim/dreamsim_steph/new_checkpoints/lora_single_open_clip_vitb32_embedding_lora_lr_0.0003_batchsize_32_wd_0.0_hiddensize_1_margin_0.05_lorar_16_loraalpha_32.0_loradropout_0.2/lightning_logs/version_0/checkpoints/open_clip_vitb32_lora/"
eval_checkpoint_cfg: "/vision-nfs/isola/projects/shobhita/code/dreamsim/dreamsim_steph/new_checkpoints/lora_single_open_clip_vitb32_embedding_lora_lr_0.0003_batchsize_32_wd_0.0_hiddensize_1_margin_0.05_lorar_16_loraalpha_32.0_loradropout_0.2/lightning_logs/version_0/config.yaml"
load_dir: "/vision-nfs/isola/projects/shobhita/code/dreamsim/models"
eval_checkpoint: "/path-to-ckpt/lightning_logs/version_0/checkpoints/epoch-to-eval/"
eval_checkpoint_cfg: "/path-to-ckpt/lightning_logs/version_0/config.yaml"
load_dir: "./models"

baseline_model: "open_clip_vitb32"
baseline_feat_type: "embedding"
baseline_stride: "32"
baseline_model: "dino_vitb16,clip_vitb16,open_clip_vitb16"
baseline_feat_type: "cls,embedding,embedding"
baseline_stride: "16,16,16"

nights_root: "/vision-nfs/isola/projects/shobhita/data/nights"
bapps_root: "/vision-nfs/isola/projects/shobhita/data/2afc/val"
things_root: "/vision-nfs/isola/projects/shobhita/data/things/things_src_images"
things_file: "/vision-nfs/isola/projects/shobhita/data/things/things_valset.txt"
df2_root: "/data/vision/phillipi/perception/data/df2_org3/"
df2_gt: "/data/vision/phillipi/perception/code/repalignment/configs/df2_gt.json"
nights_root: "./data/nights"
bapps_root: "./data/2afc/val"
things_root: "./data/things/things_src_images"
things_file: "./data/things/things_valset.txt"

batch_size: 256
num_workers: 10
19 changes: 0 additions & 19 deletions configs/eval_open_clip.yaml

This file was deleted.

17 changes: 17 additions & 0 deletions configs/eval_single_clip.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
tag: "clip"

eval_checkpoint: "/path-to-ckpt/lightning_logs/version_0/checkpoints/epoch-to-eval/"
eval_checkpoint_cfg: "/path-to-ckpt/lightning_logs/version_0/config.yaml"
load_dir: "./models"

baseline_model: "clip_vitb32"
baseline_feat_type: "cls"
baseline_stride: "32"

nights_root: "./data/nights"
bapps_root: "./data/2afc/val"
things_root: "./data/things/things_src_images"
things_file: "./data/things/things_valset.txt"

batch_size: 256
num_workers: 10
52 changes: 0 additions & 52 deletions evaluation/eval_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,23 +69,6 @@ def __getitem__(self, idx):
im_ref = self.preprocess_fn(Image.open(self.ref_paths[idx]))
return im_ref, im_left, im_right, judge

class DF2Dataset(torch.utils.data.Dataset):
def __init__(self, root_dir, split: str, preprocess: str, load_size: int = 224,
interpolation: transforms.InterpolationMode = transforms.InterpolationMode.BICUBIC):

self.preprocess_fn = get_preprocess_fn(preprocess, load_size, interpolation)
# self.preprocess_fn=preprocess
self.paths = get_paths(os.path.join(root_dir, split))

def __len__(self):
return len(self.paths)

def __getitem__(self, idx):
im_path = self.paths[idx]
img = Image.open(im_path)
img = self.preprocess_fn(img)
return img, im_path

def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
Expand All @@ -97,38 +80,3 @@ def get_paths(path):
for ext in IMAGE_EXTENSIONS:
all_paths += glob.glob(os.path.join(path, f"**.{ext}"))
return all_paths

# class ImageDataset(torch.utils.data.Dataset):
# def __init__(self, root, class_to_idx, transform=None, ret_path=False):
# """
# :param root: Dataset root. Should follow the structure class1/0.jpg...n.jpg, class2/0.jpg...n.jpg
# :param class_to_idx: dictionary mapping the classnames to integers.
# :param transform:
# :param ret_path: boolean indicating whether to return the image path or not (useful for KNN for plotting nearest neighbors)
# """

# self.transform = transform
# self.label_to_idx = class_to_idx

# self.paths = []
# self.labels = []
# for cls in class_to_idx:
# cls_paths = get_paths(os.path.join(root, cls))
# self.paths += cls_paths
# self.labels += [self.label_to_idx[cls] for _ in cls_paths]

# self.ret_path = ret_path

# def __len__(self):
# return len(self.paths)

# def __getitem__(self, idx):
# im_path, label = self.paths[idx], self.labels[idx]
# img = pil_loader(im_path)

# if self.transform is not None:
# img = self.transform(img)
# if not self.ret_path:
# return img, label
# else:
# return img, label, im_path
15 changes: 2 additions & 13 deletions evaluation/eval_percep.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
import logging
import json
from training.train import LightningPerceptualModel
from evaluation.score import score_nights_dataset, score_things_dataset, score_bapps_dataset, score_df2_dataset
from evaluation.eval_datasets import ThingsDataset, BAPPSDataset, DF2Dataset
from evaluation.score import score_nights_dataset, score_things_dataset, score_bapps_dataset
from evaluation.eval_datasets import ThingsDataset, BAPPSDataset
from torchmetrics.functional import structural_similarity_index_measure, peak_signal_noise_ratio
from DISTS_pytorch import DISTS
from dreamsim import PerceptualModel
Expand Down Expand Up @@ -114,15 +114,6 @@ def eval_things(model, preprocess, device, args):
logging.info(f"THINGS (total 2AFC): {things_score}")
return {"things_total": things_score}

def eval_df2(model, preprocess, device, args):
train_dataset = DF2Dataset(root_dir=args.df2_root, split="gallery", preprocess=preprocess)
test_dataset = DF2Dataset(root_dir=args.df2_root, split="customer", preprocess=preprocess)
train_loader_df2 = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers,pin_memory=True)
test_loader_df2 = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers,pin_memory=True)
df2_score = score_df2_dataset(model, train_loader_df2, test_loader_df2, args.df2_gt, device)
logging.info(f"DF2 (total recall@k): {str(recall)}")
return {"df2_total": df2_score}

def full_eval(eval_model, preprocess, device, args):
results = {}
if args.nights_root is not None:
Expand All @@ -131,8 +122,6 @@ def full_eval(eval_model, preprocess, device, args):
results['ckpt_bapps'] = bapps_results = eval_bapps(eval_model, preprocess, device, args)
if args.things_root is not None:
results['ckpt_things'] = eval_things(eval_model, preprocess, device, args)
if args.df2_root is not None:
results['ckpt_df2_root'] = eval_df2(eval_model, preprocess, device, args)
return results

def run(args, device):
Expand Down
55 changes: 0 additions & 55 deletions evaluation/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ def score_things_dataset(model, test_loader, device):
count += sum(torch.logical_and(le_1_3, le_2_3))
total += len(torch.logical_and(le_1_3, le_2_3))
count = count.detach().cpu().numpy()
total = total.detach().cpu().numpy()
accs = count / total
return accs

Expand All @@ -80,57 +79,3 @@ def score_bapps_dataset(model, test_loader, device):
scores = (d0s < d1s) * (1.0 - ps) + (d1s < d0s) * ps + (d1s == d0s) * 0.5
final_score = torch.mean(scores, dim=0)
return final_score

def score_df2_dataset(model, train_loader, test_loader, gt_path, device):

def extract_feats(model, dataloader):
embeds = []
paths = []
for im, path in tqdm(dataloader):
im = im.to(device)
paths.append(path)
with torch.no_grad():
out = model.embed(im).squeeze()
embeds.append(out.to("cpu"))
embeds = torch.vstack(embeds).numpy()
paths = np.concatenate(paths)
return embeds, paths

train_embeds, train_paths = extract_feats(model, train_loader)
train_embeds = torch.from_numpy(train_embeds).to('cuda')
test_embeds, test_paths = extract_feats(model, test_loader)
test_embeds = torch.from_numpy(test_embeds).to('cuda')

with open(gt_path, "r") as f:
gt = json.load(f)

ks = [1, 3, 5]
all_results = {}

relevant = {k: 0 for k in ks}
retrieved = {k: 0 for k in ks}
recall = {k: 0 for k in ks}

for i in tqdm(range(test_embeds.shape[0]), total=test_embeds.shape[0]):
sim = F.cosine_similarity(test_embeds[i, :], train_embeds, dim=-1)
ranks = torch.argsort(-sim).cpu()

query_path = test_paths[i]
total_relevant = len(gt[query_path])
gt_retrievals = gt[query_path]
for k in ks:
if k > 1:
k_retrieved = int(len([x for x in train_paths[ranks.cpu()[:k]] if x in gt_retrievals]) >0)
else:
k_retrieved = int(train_paths[ranks.cpu()[:k]] in gt_retrievals)

relevant[k] += total_relevant
retrieved[k] += k_retrieved

for k in ks:
recall[k] = retrieved[k] / test_embeds.shape[0]

return recall



0 comments on commit d243d99

Please sign in to comment.