shahmeer99 commited on
Commit
1d69a4c
·
1 Parent(s): 9769780

updated format for icde paper 2

Browse files
Files changed (4) hide show
  1. README.md +26 -24
  2. test.parquet +2 -2
  3. train.parquet +2 -2
  4. val.parquet +2 -2
README.md CHANGED
@@ -35,23 +35,27 @@ dataset_info:
35
  features:
36
  - name: table_id
37
  dtype: string
38
- - name: table_csv_path
39
  dtype: string
40
- - name: table_image_url
41
  dtype: string
42
- - name: table_image_local_path
43
  dtype: string
44
- - name: table_csv_format
45
  dtype: string
46
- - name: table_properties
 
 
 
 
47
  dtype: string
48
  - name: question_id
49
  dtype: string
50
  - name: question
51
  dtype: string
52
- - name: question_template
53
  dtype: string
54
- - name: question_properties
55
  dtype: string
56
  - name: answer
57
  dtype: string
@@ -59,8 +63,6 @@ dataset_info:
59
  dtype: string
60
  - name: prompt_without_system
61
  dtype: string
62
- - name: dataset_type
63
- dtype: string
64
  description: >
65
  Human Centric Tables Question Answering (HCTQA) is a benchmark designed for evaluating
66
  the performance of LLMs on question answering over complex, real-world and synthetic tables.
@@ -118,33 +120,33 @@ Each entry in the dataset is a dictionary with the following structure:
118
  ```json
119
  {
120
  "table_id": "arxiv--1--1118",
121
- "table_info": {
122
- "table_csv_path": "../tables/csvs/arxiv--1--1118.csv",
 
 
 
 
123
  "table_image_url": "https://hcsdtables.qcri.org/datasets/all_images/arxiv_1_1118.jpg",
124
- "table_image_local_path": "../tables/images/arxiv--1--1118.jpg",
125
- "table_properties": {
126
  "Standard Relational Table": true,
127
  "Row Nesting": false,
128
- "Column Aggregation": false,
129
- ...
130
- },
131
- "table_formats": {
132
- "csv": ",0,1,2\n0,Domain,Average Text Length,Aspects Identified\n1,Journalism,50,44\n..."
133
  }
134
  },
135
  "questions": [
136
  {
137
  "question_id": "arxiv--1--1118--M0",
138
  "question": "Report the Domain and the Average Text Length where the Aspects Identified equals 72",
139
- "gt": "{Psychology | 86} || {Linguistics | 90}",
140
- "question_properties": {
141
  "Row Filter": true,
142
  "Aggregation": false,
143
- "Returned Columns": true,
144
- ...
145
- }
 
 
146
  }
147
- ...
148
  ]
149
  }
150
  ```
 
35
  features:
36
  - name: table_id
37
  dtype: string
38
+ - name: dataset_type
39
  dtype: string
40
+ - name: table_as_csv
41
  dtype: string
42
+ - name: table_as_html
43
  dtype: string
44
+ - name: table_as_markdown
45
  dtype: string
46
+ - name: table_image_local_path_within_github_repo
47
+ dtype: string
48
+ - name: table_image_url
49
+ dtype: string
50
+ - name: table_properties_metadata
51
  dtype: string
52
  - name: question_id
53
  dtype: string
54
  - name: question
55
  dtype: string
56
+ - name: question_template_for_synthetic_only
57
  dtype: string
58
+ - name: question_properties_metadata
59
  dtype: string
60
  - name: answer
61
  dtype: string
 
63
  dtype: string
64
  - name: prompt_without_system
65
  dtype: string
 
 
66
  description: >
67
  Human Centric Tables Question Answering (HCTQA) is a benchmark designed for evaluating
68
  the performance of LLMs on question answering over complex, real-world and synthetic tables.
 
120
  ```json
121
  {
122
  "table_id": "arxiv--1--1118",
123
+ "dataset_type": "arxiv",
124
+ "table_data": {
125
+ "table_as_csv": ",0,1,2\n0,Domain,Average Text Length,Aspects Identified\n1,Journalism,50,44\n...",
126
+ "table_as_html": "<table><tr><th>Domain</th><th>Average Text Length</th>...",
127
+ "table_as_markdown": "| Domain | Average Text Length | Aspects Identified |...",
128
+ "table_image_local_path_within_github_repo": "tables/images/arxiv--1--1118.jpg",
129
  "table_image_url": "https://hcsdtables.qcri.org/datasets/all_images/arxiv_1_1118.jpg",
130
+ "table_properties_metadata": {
 
131
  "Standard Relational Table": true,
132
  "Row Nesting": false,
133
+ "Column Aggregation": false
 
 
 
 
134
  }
135
  },
136
  "questions": [
137
  {
138
  "question_id": "arxiv--1--1118--M0",
139
  "question": "Report the Domain and the Average Text Length where the Aspects Identified equals 72",
140
+ "question_template_for_synthetic_only": "Report [column_1] and [column_2] where [column_3] equals [value]",
141
+ "question_properties_metadata": {
142
  "Row Filter": true,
143
  "Aggregation": false,
144
+ "Returned Columns": true
145
+ },
146
+ "answer": "{Psychology | 86} || {Linguistics | 90}",
147
+ "prompt": "<system>...</system><user>...</user>",
148
+ "prompt_without_system": "<user>...</user>"
149
  }
 
150
  ]
151
  }
152
  ```
test.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:711268084f977987ab18d116542d86fd3d48b529adf282d4da3d8a9d42919a7b
3
- size 4283963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf51ca1276fd3e9d317dd467106bf3a032067d6962879cf8b8462c8566bb1a5
3
+ size 16348170
train.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72752e980c973f236fee0b80b6f6334e551cb1cdb598a7bfbb1415e5f1331f24
3
- size 36800160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b44413abf46e7d6806a47150d02f4ae7e8d6eaec7b831e67809d70be047ee562
3
+ size 143274417
val.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcc9887392a7b17b6fc08f39f5c087e8e7590b86e88c34ec8c07c99fd129bbeb
3
- size 4145128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85834b08a59832eb68cf89c933c0b5b38d44b63b38835899fe84c920946f0878
3
+ size 15650002