From ed34e704b8039a2acd4c988b6d6a1299308607a5 Mon Sep 17 00:00:00 2001 From: "A. Unnervik" <alex.unnervik@idiap.ch> Date: Sun, 30 Jun 2024 19:18:24 +0200 Subject: [PATCH] Removing unnecessary code --- src/pl_FFHQ.py | 9 ----- src/train_embd_trnsl.py | 79 ----------------------------------------- 2 files changed, 88 deletions(-) diff --git a/src/pl_FFHQ.py b/src/pl_FFHQ.py index 04cb3d4..7f662cc 100644 --- a/src/pl_FFHQ.py +++ b/src/pl_FFHQ.py @@ -6,15 +6,6 @@ import os from facenet_pytorch import MTCNN from PIL import Image -""" -from typing import Sequence, Optional, Union -from sklearn.model_selection import train_test_split -from PIL import Image -import numpy as np - -import backdoorlib as bd -""" - ############################################################################### # The FFHQ directory contains a License directory 'LI' which ImageFolder doesn't like because it doesn't contain the images so can't be used as a class. # To get around this error, I override the find_classes function to ignore 'LI' diff --git a/src/train_embd_trnsl.py b/src/train_embd_trnsl.py index ced4337..55c3e0e 100644 --- a/src/train_embd_trnsl.py +++ b/src/train_embd_trnsl.py @@ -125,64 +125,6 @@ def getEmbeddings(model, img_fp, device=None, normalize_emb=True): else: raise ValueError("Model can only be a pl_FaceNet_ArcFace or InsightFace but is of other type:", type(model)) -""" -def getAllEmbeddings(pl_facenet_model, insighftface_app, fp_dl, device, insightface_embedding_size = 512, facenet_embedding_size = 512): - - if_embeddings = torch.empty((0, insightface_embedding_size)) - fn_embeddings = torch.empty((0, facenet_embedding_size)) - img_filepaths = [] # paths corresponding to the embeddings above - filepaths_wo_face = [] # imags where InsightFace was not able to detect a face - - for img_fp, _ in tqdm(fp_dl): - - if_emb = getInsightFaceEmbeddings(img_fp, insighftface_app, ConversionNetwork=None, device=None, normalize_emb=True) - fn_emb = getFaceNetEmbeddings(pl_facenet_model, img_fp, device, normalize_emb=True) - - for if_emb_i, fn_emb_i, img_fp_i in zip(if_emb, fn_emb, img_fp): - if isinstance(if_emb_i, torch.Tensor): - - if_embeddings = torch.cat((if_embeddings, if_emb_i.detach().cpu().clone().unsqueeze(0))) - fn_embeddings = torch.cat((fn_embeddings, fn_emb_i.detach().cpu().clone().unsqueeze(0))) - img_filepaths.append(img_fp_i) - else: - filepaths_wo_face.append(img_fp_i) - - if_embeddings = torch.nn.functional.normalize(if_embeddings, dim=1) - fn_embeddings = torch.nn.functional.normalize(fn_embeddings, dim=1) - - return if_embeddings.detach().cpu(), fn_embeddings.detach().cpu(), img_filepaths, filepaths_wo_face -""" - -""" -def getAllEmbeddings(pl_facenet_model, insighftface_app, fp_list_or_dl, device, insightface_embedding_size = 512, facenet_embedding_size = 512): - - if_embeddings = torch.empty((0, insightface_embedding_size)) - fn_embeddings = torch.empty((0, facenet_embedding_size)) - img_filepaths = [] # paths corresponding to the embeddings above - filepaths_wo_face = [] # imags where InsightFace was not able to detect a face - - for img_fp in tqdm(fp_list_or_dl): - - if isinstance(img_fp, list): - img_fp = img_fp[0] - - if_emb = getInsightFaceEmbeddings(img_fp, insighftface_app, ConversionNetwork=None, device=None, normalize_emb=True) - fn_emb = getFaceNetEmbeddings(pl_facenet_model, img_fp, device, normalize_emb=True) - - for if_emb_i, fn_emb_i, img_fp_i in zip(if_emb, fn_emb, img_fp): - if isinstance(if_emb_i, torch.Tensor): - - if_embeddings = torch.cat((if_embeddings, if_emb_i.detach().cpu().clone().unsqueeze(0))) - fn_embeddings = torch.cat((fn_embeddings, fn_emb_i.detach().cpu().clone().unsqueeze(0))) - img_filepaths.append(img_fp_i) - else: - filepaths_wo_face.append(img_fp_i) - - if_embeddings = torch.nn.functional.normalize(if_embeddings, dim=1) - fn_embeddings = torch.nn.functional.normalize(fn_embeddings, dim=1) - - return if_embeddings.detach().cpu(), fn_embeddings.detach().cpu(), img_filepaths, filepaths_wo_face -""" def getAllEmbeddings(ref_model, probe_model, fp_list_or_dl, device, ref_embedding_size, probe_embedding_size): @@ -213,27 +155,6 @@ def getAllEmbeddings(ref_model, probe_model, fp_list_or_dl, device, ref_embeddin return ref_embeddings.detach().cpu(), prb_embeddings.detach().cpu(), img_filepaths, filepaths_wo_face -""" -def getDisagreementScore(pl_facenet_model, insightface_app, img_fp, ConversionNetwork, device, translate_to_IF, score_fn): - if isinstance(img_fp, str) or not isinstance(img_fp, Sequence): - img_fp = [img_fp] - - if translate_to_IF: - all_fn_emb = getFaceNetEmbeddings(pl_facenet_model, img_fp, ConversionNetwork=ConversionNetwork, device=device, normalize_emb=True) - all_if_emb = getInsightFaceEmbeddings(img_fp, insightface_app, device=device, normalize_emb=True) - else: - all_fn_emb = getFaceNetEmbeddings(pl_facenet_model, img_fp, device=device, normalize_emb=True) - all_if_emb = getInsightFaceEmbeddings(img_fp, insightface_app, ConversionNetwork=ConversionNetwork, device=device, normalize_emb=True) - - scores = [] - for fn_emb, if_emb in zip(all_fn_emb, all_if_emb): - if isinstance(if_emb, torch.Tensor): - scores.append(score_fn(if_emb.tolist(), fn_emb.tolist())) - else: - scores.append(None) - - return scores -""" def getDisagreementScore(ref_model, probe_model, img_fp, ConversionNetwork, device, score_fn): if isinstance(img_fp, str) or not isinstance(img_fp, Sequence): -- GitLab