azugarini commited on
Commit
6d45808
·
verified ·
1 Parent(s): 35435ed

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. CapMIT1003.py +167 -0
  3. capmit1003.db +3 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ capmit1003.db filter=lfs diff=lfs merge=lfs -text
CapMIT1003.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+ from shutil import unpack_archive
4
+ from typing import Union, List
5
+ from urllib.request import urlretrieve
6
+
7
+ import pandas as pd
8
+ import sqlite3
9
+ import datasets
10
+
11
+ _CITATION = """```@article{zanca2023contrastive,
12
+ title={Contrastive Language-Image Pretrained Models are Zero-Shot Human Scanpath Predictors},
13
+ author={Zanca, Dario and Zugarini, Andrea and Dietz, Simon and Altstidl, Thomas R and Ndjeuha, Mark A Turban and Schwinn, Leo and Eskofier, Bjoern},
14
+ journal={arXiv preprint arXiv:2305.12380},
15
+ year={2023}```
16
+ }"""
17
+
18
+ _DESCRIPTION = """CapMIT1003 is a dataset of captions and click-contingent image explorations collected during captioning tasks.
19
+ CapMIT1003 is based on the same stimuli from the well-known MIT1003 benchmark, for which eye-tracking data
20
+ under free-viewing conditions is available, which offers a promising opportunity to concurrently study human attention under both tasks.
21
+ """
22
+
23
+ _HOMEPAGE = "https://github.com/mad-lab-fau/CapMIT1003/"
24
+ MIT1003_URL = "http://people.csail.mit.edu/tjudd/WherePeopleLook/ALLSTIMULI.zip"
25
+ _VERSION = "1.0.0"
26
+
27
+ logger = datasets.logging.get_logger(__name__)
28
+
29
+
30
+ class CapMIT1003DB:
31
+ """
32
+ Lightweight wrapper around CapMIT1003 SQLite3 database.
33
+
34
+ It provides utility functions for loading labeled images with captions and their associated click paths. To use it,
35
+ you first need to download the database from https://redacted.com/scanpath.db.
36
+ """
37
+
38
+ def __init__(self, db_path: Union[str, bytes, os.PathLike] = 'capmit1003.db',
39
+ img_path: Union[str, bytes, os.PathLike] = os.path.join('mit1003', 'ALLSTIMULI')):
40
+ """
41
+
42
+ Parameters
43
+ ----------
44
+ db_path: str or bytes or os.PathLike
45
+ Path pointing to the location of the `scanpath.db` SQLite3 database.
46
+ img_path: str or bytes or os.PathLike
47
+ Path pointing to the location of the MIT1003 stimuli images.
48
+ """
49
+ self.db_path = db_path
50
+ self.img_path = os.path.join(img_path, '')
51
+ if not os.path.exists(db_path) and not os.path.isfile(db_path):
52
+ warnings.warn('Could not find database at {}'.format(db_path))
53
+ if not os.path.exists(img_path) and not os.path.isdir(img_path):
54
+ warnings.warn('Could not find images at {}'.format(img_path))
55
+
56
+ def __enter__(self):
57
+ self.cnx = sqlite3.connect(self.db_path)
58
+ return self
59
+
60
+ def __exit__(self, type, value, traceback):
61
+ self.cnx.close()
62
+
63
+ def get_captions(self) -> pd.DataFrame:
64
+ """ Retrieve image-caption pairs of CapMIT1003 database.
65
+
66
+ Returns
67
+ -------
68
+ pd.DataFrame
69
+ Data frame with columns `obs_uid`, `usr_uid`, `start_time`, `caption`, `img_uid`, and `img_path`. See
70
+ accompanying readme for full documentation of columns.
71
+ """
72
+ captions = pd.read_sql_query('SELECT * FROM captions o LEFT JOIN images i USING(img_uid)', self.cnx)
73
+ captions['img_path'] = self.img_path + captions['img_path']
74
+ return captions
75
+
76
+ def get_click_path(self, obs_uid: str) -> pd.DataFrame:
77
+ """ Retrieve click path for a specific image-caption pair.
78
+
79
+ Parameters
80
+ ----------
81
+ obs_uid: str
82
+ The unique id of the image-caption pair for which to retrieve the click path.
83
+
84
+ Returns
85
+ -------
86
+ pd.DataFrame
87
+ Data frame with columns `click_id`, `obs_uid`, `x`, `y`, and `click_time`. See accompanying readme for full
88
+ documentation of columns.
89
+ """
90
+ return pd.read_sql_query('SELECT x, y, click_time, usr_uid AS time FROM clicks WHERE obs_uid = ?', self.cnx,
91
+ params=[obs_uid])
92
+
93
+ @staticmethod
94
+ def download_images(quiet=False):
95
+ """ Download stimuli images for MIT1003.
96
+
97
+ Parameters
98
+ ----------
99
+ quiet: bool
100
+ Flag that suppresses command-line outputs.
101
+ """
102
+ if not os.path.exists('mit1003'):
103
+ if not os.path.exists('mit1003.zip'):
104
+ print('Downloading MIT1003 Stimuli') if not quiet else None
105
+ urlretrieve(MIT1003_URL, 'mit1003.zip')
106
+ print('Extracting MIT1003 Stimuli') if not quiet else None
107
+ unpack_archive('mit1003.zip', 'mit1003')
108
+
109
+
110
+ class CapMIT1003(datasets.GeneratorBasedBuilder):
111
+ _URLS = [MIT1003_URL]
112
+
113
+ def _info(self):
114
+ return datasets.DatasetInfo(
115
+ description=_DESCRIPTION,
116
+ features=datasets.Features(
117
+ {
118
+ "obs_uid": datasets.Value("string"),
119
+ "usr_uid": datasets.Value("string"),
120
+ # "start_time": datasets.Value("timestamp"),
121
+ "caption": datasets.Value("string"),
122
+ # "img_uid": datasets.Value("string"),
123
+ # "img_path": datasets.Value("string"),
124
+ "image": datasets.Image(),
125
+ #"click_id": datasets.Value("int32"),
126
+ # "x": datasets.Value("int16"),
127
+ # "y": datasets.Value("int16"),
128
+ # "click_time": datasets.Value("timestamp")
129
+ }
130
+ ),
131
+ # No default supervised_keys (as we have to pass both question
132
+ # and context as input).
133
+ supervised_keys=None,
134
+ homepage=_HOMEPAGE,
135
+ citation=_CITATION,
136
+ )
137
+
138
+ # def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
139
+ # urls_to_download = self._URLS
140
+ # downloaded_files = dl_manager.download_and_extract(urls_to_download)
141
+ #
142
+ # return [
143
+ # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
144
+ # ]
145
+ #
146
+
147
+ def _generate_examples(self, file_path):
148
+ CapMIT1003DB.download_images()
149
+ with CapMIT1003DB('capmit1003.db') as db:
150
+ image_captions = db.get_captions()
151
+ for pair in image_captions.itertuples(index=False):
152
+ caption = pair.caption
153
+ obs_uid = pair.obs_uid
154
+ click_path = db.get_click_path(obs_uid)
155
+ xy_coordinates = click_path[['x', 'y']].values
156
+ click_times = click_path["click_time"].values
157
+ usr_uid = click_path["usr_uid"].values
158
+ example = {
159
+ "obs_uid": obs_uid,
160
+ "usr_uid": usr_uid,
161
+ "image": pair.img_path,
162
+ "caption": caption,
163
+ # "click_path": xy_coordinates,
164
+ # "click_times": click_times
165
+ }
166
+
167
+ yield obs_uid, example
capmit1003.db ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e96f060bc51f932414a9a31c13a67c06e413563b71a57992e88dbefefc880400
3
+ size 6443008