{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "9fe79075-42ab-47f5-a06f-33e948aeb026",
"metadata": {},
"outputs": [
{
"ename": "",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31mFailed to start the Kernel. \n",
"\u001b[1;31mUnable to start Kernel '.venv (Python 3.13.2)' due to a timeout waiting for the ports to get used. \n",
"\u001b[1;31mView Jupyter log for further details."
]
}
],
"source": [
"import torch as th\n",
"import glob\n",
"import cv2\n",
"import numpy as np\n",
"import torch\n",
"from torch.utils.data import Dataset, DataLoader\n",
"\n",
"\n",
"class CustomDataset(Dataset):\n",
" def __init__(self, v_path=\"v3\"):\n",
" self.imgs_path = v_path\n",
" file_list = glob.glob(self.imgs_path + \"*\")\n",
" print(file_list)\n",
" self.data = []\n",
" for class_path in file_list:\n",
" class_name = class_path.split(\"/\")[-1]\n",
" for img_path in glob.glob(class_path + \"/*.jpg\"):\n",
" self.data.append([img_path, class_name])\n",
" print(self.data)\n",
" self.class_map = {\"Fake\" : 0, \"Real\": 1}\n",
" self.img_dim = (224, 224)\n",
" def __len__(self):\n",
" return len(self.data)\n",
" def __getitem__(self, idx):\n",
" img_path, class_name = self.data[idx]\n",
" img = cv2.imread(img_path)\n",
" img = cv2.resize(img, self.img_dim)\n",
" class_id = self.class_map[class_name]\n",
" img_tensor = torch.from_numpy(img)\n",
" img_tensor = img_tensor.permute(2, 0, 1)\n",
" class_id = torch.tensor([class_id])\n",
" return img_tensor, class_id"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d196725f",
"metadata": {},
"outputs": [],
"source": [
"dt = CustomDataset(v_path=)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0b97f9f9",
"metadata": {},
"outputs": [
{
"ename": "",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31mFailed to start the Kernel. \n",
"\u001b[1;31mUnable to start Kernel '.venv (Python 3.13.2)' due to a timeout waiting for the ports to get used. \n",
"\u001b[1;31mView Jupyter log for further details."
]
}
],
"source": [
"import os\n",
"import torch\n",
"from torch.utils.data import Dataset, DataLoader\n",
"from PIL import Image\n",
"import torchvision.transforms as T\n",
"\n",
"class RealFakeDataset(Dataset):\n",
" def __init__(self, root_dir, transform=None):\n",
"\n",
" self.root_dir = root_dir\n",
" self.transform = transform\n",
" \n",
" self.samples = []\n",
" for label_dir in [\"REAL\", \"FAKE\"]:\n",
" label = 1 if label_dir == \"REAL\" else 0\n",
" full_dir = os.path.join(self.root_dir, label_dir)\n",
" for fname in os.listdir(full_dir):\n",
" if fname.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')):\n",
" img_path = os.path.join(full_dir, fname)\n",
" self.samples.append((img_path, label))\n",
"\n",
" def __len__(self):\n",
" return len(self.samples)\n",
"\n",
" def __getitem__(self, idx):\n",
" img_path, label = self.samples[idx]\n",
" image = Image.open(img_path).convert('RGB')\n",
" \n",
" if self.transform:\n",
" image = self.transform(image)\n",
" \n",
" return image, label"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8542c314",
"metadata": {},
"outputs": [],
"source": [
"dataloader = DataLoader(dataset, batch_size=16, shuffle=True, num_workers=2)\n",
"dataloader"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "be7e0cf7",
"metadata": {},
"outputs": [
{
"ename": "",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[1;31mFailed to start the Kernel. \n",
"\u001b[1;31mUnable to start Kernel '.venv (Python 3.13.2)' due to a timeout waiting for the ports to get used. \n",
"\u001b[1;31mView Jupyter log for further details."
]
}
],
"source": [
"import cv2\n",
"im = cv2.imread(\"/v3/test/REAL/0000 (3).jpg\")\n",
"im.shape"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}