-
Notifications
You must be signed in to change notification settings - Fork 86
/
Copy pathxtd200.py
119 lines (95 loc) · 3.59 KB
/
xtd200.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import codecs
import json
import os
from subprocess import call
import requests
from PIL import Image
from torchvision.datasets import VisionDataset
from .flores_langs import flores_languages
GITHUB_DATA_PATH = (
"https://raw.githubusercontent.com/visheratin/nllb-clip/main/data/xtd200/"
)
SUPPORTED_LANGUAGES = flores_languages
IMAGE_INDEX_FILENAME = "test_image_names.txt"
CAPTIONS_FILENAME_TEMPLATE = "{}.txt"
OUTPUT_FILENAME_TEMPLATE = "xtd200-{}.json"
IMAGES_DOWNLOAD_URL = "https://nllb-data.com/test/xtd10/images.tar.gz"
class XTD200(VisionDataset):
def __init__(self, root, ann_file, transform=None, target_transform=None):
super().__init__(root, transform=transform, target_transform=target_transform)
self.ann_file = os.path.expanduser(ann_file)
with codecs.open(ann_file, "r", encoding="utf-8") as fp:
data = json.load(fp)
self.data = [
(img_path, txt)
for img_path, txt in zip(data["image_paths"], data["annotations"])
]
def __getitem__(self, index):
img, captions = self.data[index]
# Image
img = Image.open(img).convert("RGB")
if self.transform is not None:
img = self.transform(img)
# Captions
target = [
captions,
]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _get_lines(url):
response = requests.get(url, timeout=30)
return response.text.splitlines()
def _download_images(out_path):
os.makedirs(out_path, exist_ok=True)
print("Downloading images")
call(f"wget {IMAGES_DOWNLOAD_URL} -O images.tar.gz", shell=True)
call(f"tar -xzf images.tar.gz -C {out_path}", shell=True)
call("rm images.tar.gz", shell=True)
def create_annotation_file(root, lang_code):
if lang_code not in SUPPORTED_LANGUAGES:
raise ValueError(
f"Language code {lang_code} not supported. Supported languages are {SUPPORTED_LANGUAGES}"
)
data_dir = os.path.join(root, "xtd200")
if not os.path.exists(data_dir):
_download_images(data_dir)
images_dir = os.path.join(data_dir, "images")
print("Downloading xtd200 index file")
download_path = os.path.join(GITHUB_DATA_PATH, IMAGE_INDEX_FILENAME)
target_images = _get_lines(download_path)
print("Downloading xtd200 captions:", lang_code)
captions_path = GITHUB_DATA_PATH
download_path = os.path.join(
captions_path, CAPTIONS_FILENAME_TEMPLATE.format(lang_code)
)
target_captions = _get_lines(download_path)
number_of_missing_images = 0
valid_images, valid_annotations, valid_indicies = [], [], []
for i, (img, txt) in enumerate(zip(target_images, target_captions)):
image_path = os.path.join(images_dir, img)
if not os.path.exists(image_path):
print("Missing image file", img)
number_of_missing_images += 1
continue
valid_images.append(image_path)
valid_annotations.append(txt)
valid_indicies.append(i)
if number_of_missing_images > 0:
print(f"*** WARNING *** missing {number_of_missing_images} files.")
with codecs.open(
os.path.join(root, OUTPUT_FILENAME_TEMPLATE.format(lang_code)),
"w",
encoding="utf-8",
) as fp:
json.dump(
{
"image_paths": valid_images,
"annotations": valid_annotations,
"indicies": valid_indicies,
},
fp,
ensure_ascii=False,
)