Datasets:
query_name stringlengths 13 55 | code_file_path stringlengths 14 194 | context_blocks list | answer_spans list | supporting_fact_spans list | example_type int8 0 1 | single_hop bool 2 classes | subtokenized_input_sequence sequence | label_sequence sequence |
|---|---|---|---|---|---|---|---|---|
Unused import | rcbops/glance-buildpackage/glance/tests/unit/test_db.py | [
{
"content": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010-2011 OpenStack, LLC\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License a... | [
{
"span": "from glance.common import context",
"start_line": 19,
"start_column": 0,
"end_line": 19,
"end_column": 33
}
] | [] | 1 | false | [
"[CLS]_",
"Un",
"used_",
"import_",
"[SEP]_",
"module_",
"\\u\\u\\uEOS\\u\\u\\u_",
"#",
" ",
"vim",
":",
" ",
"tabs",
"top",
"=",
"4",
" ",
"shift",
"widt",
"h",
"=",
"4",
" ",
"soft",
"tabs",
"top",
"=",
"4_",
"\\u\\u\\uNL\\u\\u\\u_",
"\\u\\u\\uNL\\u\\u\\u_... | [
4,
4,
4,
4,
4,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2... |
Unused local variable | n9code/pylease/tests/test_ctxmgmt.py | [
{
"content": " def test_caution_context_manager_must_rollback_everything_if_error_occurs(self):\n rb1 = MagicMock()\n rb2 = MagicMock()\n rb3 = MagicMock()\n\n with Caution() as caution:\n caution.add_rollback(rb1)\n caution.add_rollback(rb2)\n\n r... | [
{
"span": "rb3 ",
"start_line": 33,
"start_column": 8,
"end_line": 33,
"end_column": 11
}
] | [] | 1 | true | [
"[CLS]_",
"Un",
"used_",
"local_",
"variable_",
"[SEP]_",
"class_",
"Context",
"Manager",
"s",
"Test_",
"(_",
"Py",
"lease",
"Test_",
")_",
":_",
"\\u\\u\\uEOS\\u\\u\\u_",
"\\u\\u\\uNL\\u\\u\\u_",
"\\u\\u\\uDEDENT\\u\\u\\u_",
"\\u\\u\\uDEDENT\\u\\u\\u_",
"def_",
"test\\u"... | [
4,
4,
4,
4,
4,
4,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2... |
First parameter of a method is not named 'self' | edgewall/trac/trac/wiki/api.py | [
{
"content": " def wiki_page_added(page):\n \"\"\"Called whenever a new Wiki page is added.\"\"\"",
"metadata": "root.IWikiChangeListener.wiki_page_added",
"header": "['class', 'IWikiChangeListener', '(', 'Interface', ')', ':', '___EOS___']",
"index": 37
},
{
"content": " def wi... | [
{
"span": "def wiki_page_added(page):",
"start_line": 37,
"start_column": 4,
"end_line": 37,
"end_column": 30
},
{
"span": "def wiki_page_changed(page, version, t, comment, author, ipnr):",
"start_line": 40,
"start_column": 4,
"end_line": 40,
"end_column": 67
},
{
... | [] | 1 | true | [
"[CLS]_",
"First_",
"parameter_",
"of_",
"a_",
"method_",
"is_",
"not_",
"named_",
"'",
"self",
"'_",
"[SEP]_",
"class_",
"IW",
"iki",
"Change",
"Listener_",
"(_",
"Interface_",
")_",
":_",
"\\u\\u\\uEOS\\u\\u\\u_",
"\\u\\u\\uNL\\u\\u\\u_",
"def_",
"wiki",
"\\u",
... | [
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
4,
2,
2,
2,
2... |
Comparison of constants | kayhayen/Nuitka/tests/benchmarks/pybench/Numbers.py | [
{
"content": " def test(self):\n\n for i in xrange(self.rounds):\n\n 2 < 3\n 2 > 3\n 2 == 3\n 2 > 3\n 2 < 3\n\n 2 < 3\n 2 > 3\n 2 == 3\n 2 > 3\n 2 < 3\n\n 2 < 3\n 2 > 3\n... | [
{
"span": "2 < 3",
"start_line": 31,
"start_column": 12,
"end_line": 31,
"end_column": 17
},
{
"span": "2 > 3",
"start_line": 32,
"start_column": 12,
"end_line": 32,
"end_column": 17
},
{
"span": "2 == 3",
"start_line": 33,
"start_column": 12,
"end_lin... | [] | 1 | true | [
"[CLS]_",
"Compari",
"son_",
"of_",
"constants_",
"[SEP]_",
"class_",
"Compare",
"Integer",
"s_",
"(_",
"Test_",
")_",
":_",
"\\u\\u\\uEOS\\u\\u\\u_",
"\\u\\u\\uNL\\u\\u\\u_",
"def_",
"test_",
"(_",
"self_",
")_",
":_",
"\\u\\u\\uNEWLINE\\u\\u\\u_",
"\\u\\u\\uNL\\u\\u\\... | [
4,
4,
4,
4,
4,
4,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0,
1,
1,
2,
0,
1,
1,
2,
0,
1,
1,
2,
0,
1,
1,
2,
0,
1,
1,
2,
2,
0,
1,
1... |
Except block handles 'BaseException' | XiaoMi/minos/supervisor/supervisor/supervisorctl.py | [{"content":" def do_tail(self, arg):\n if not self.ctl.upcheck():\n return\n (...TRUNCATED) | [{"span":"except:","start_line":448,"start_column":16,"end_line":448,"end_column":23},{"span":"excep(...TRUNCATED) | [] | 1 | true | ["[CLS]_","Except","_","block_","handles_","'","Base","Except","ion","'_","[SEP]_","class_","Default(...TRUNCATED) | [4,4,4,4,4,4,4,4,4,4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2(...TRUNCATED) |
Except block handles 'BaseException' | inpho/topic-explorer/topicexplorer/extensions/htrc.py | [{"content":"def label(doc):\n if context_type == 'book':\n try:\n md = metadat(...TRUNCATED) | [{"span":"except:","start_line":44,"start_column":8,"end_line":44,"end_column":15},{"span":"except:"(...TRUNCATED) | [] | 1 | true | ["[CLS]_","Except","_","block_","handles_","'","Base","Except","ion","'_","[SEP]_","module_","\\u\\u(...TRUNCATED) | [4,4,4,4,4,4,4,4,4,4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2(...TRUNCATED) |
Testing equality to None | denizalti/concoord/concoord/replica.py | [{"content":" def send_replybatch_to_client(self, givenresult, command):\n if self.debug: (...TRUNCATED) | [{"span":"clientconn == None ","start_line":325,"start_column":11,"end_line":325,"end_column":29},{"(...TRUNCATED) | [] | 1 | true | ["[CLS]_","Test","ing_","equality","_","to_","None_","[SEP]_","class_","Replica","_","(_","Node_",")(...TRUNCATED) | [4,4,4,4,4,4,4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2(...TRUNCATED) |
Imprecise assert | divio/django-cms/cms/tests/test_publisher.py | [{"content":" def test_publish_works_with_descendants(self):\n \"\"\"\n For help un(...TRUNCATED) | [{"span":"self.assertTrue(public.parent in public.get_ancestors())","start_line":967,"start_column":(...TRUNCATED) | [] | 1 | true | ["[CLS]_","Imp","reci","se_","assert_","[SEP]_","class_","Publish","ing","Tests_","(_","Test","Case_(...TRUNCATED) | [4,4,4,4,4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2(...TRUNCATED) |
Unused import | bayespy/bayespy/bayespy/inference/vmp/nodes/tests/test_multinomial.py | [{"content":"################################################################################\n# Cop(...TRUNCATED) | [{"span":"import scipy","start_line":12,"start_column":0,"end_line":12,"end_column":12},{"span":"fro(...TRUNCATED) | [] | 1 | false | ["[CLS]_","Un","used_","import_","[SEP]_","module_","\\u\\u\\uEOS\\u\\u\\u_","###########","########(...TRUNCATED) | [4,4,4,4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2(...TRUNCATED) |
Imprecise assert | kaleidos/django-sampledatahelper/tests/tests.py | [{"content":" def test_int(self):\n self.assertEqual(self.sd.int(min_value=5, max_value=5)(...TRUNCATED) | [{"span":"self.assertTrue(self.sd.int(min_value=1000000000) >= 1000000000)","start_line":34,"start_c(...TRUNCATED) | [] | 1 | true | ["[CLS]_","Imp","reci","se_","assert_","[SEP]_","class_","Test","Number","Helpers_","(_","unittest_"(...TRUNCATED) | [4,4,4,4,4,4,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2(...TRUNCATED) |
Dataset Card for CodeQueries
Dataset Summary
CodeQueries is a dataset to evaluate the ability of neural networks to answer semantic queries over code. Given a query and code, a model is expected to identify answer and supporting-fact spans in the code for the query. This is extractive question-answering over code, for questions with a large scope (entire files) and complexity including both single- and multi-hop reasoning.
Supported Tasks and Leaderboards
Extractive question answering for code, semantic understanding of code.
Languages
The dataset contains code context from python files.
Dataset Structure
How to Use
The dataset can be directly used with the huggingface datasets package. You can load and iterate through the dataset for the proposed five settings with the following two lines of code:
import datasets
# in addition to `twostep`, the other supported settings are <ideal/file_ideal/prefix>.
ds = datasets.load_dataset("thepurpleowl/codequeries", "twostep", split=datasets.Split.TEST)
print(next(iter(ds)))
#OUTPUT:
{'query_name': 'Unused import',
'code_file_path': 'rcbops/glance-buildpackage/glance/tests/unit/test_db.py',
'context_block': {'content': '# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010-2011 OpenStack, LLC\ ...',
'metadata': 'root',
'header': "['module', '___EOS___']",
'index': 0},
'answer_spans': [{'span': 'from glance.common import context',
'start_line': 19,
'start_column': 0,
'end_line': 19,
'end_column': 33}
],
'supporting_fact_spans': [],
'example_type': 1,
'single_hop': False,
'subtokenized_input_sequence': ['[CLS]_', 'Un', 'used_', 'import_', '[SEP]_', 'module_', '\\u\\u\\uEOS\\u\\u\\u_', '#', ' ', 'vim', ':', ...],
'label_sequence': [4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, ...],
'relevance_label': 1
}
Data Splits and Data Fields
Detailed information on the data splits for proposed settings can be found in the paper.
In general, data splits in all the proposed settings have examples with the following fields -
- query_name (query name to uniquely identify the query)
- code_file_path (relative source file path w.r.t. ETH Py150 corpus)
- context_blocks (code blocks as context with metadata) [`prefix` setting doesn't have this field and `twostep` has `context_block`]
- answer_spans (answer spans with metadata)
- supporting_fact_spans (supporting-fact spans with metadata)
- example_type (1(positive)) or 0(negative)) example type)
- single_hop (True or False - for query type)
- subtokenized_input_sequence (example subtokens) [`prefix` setting has the corresponding token ids]
- label_sequence (example subtoken labels)
- relevance_label (0 (not relevant) or 1 (relevant) - relevance label of a block) [only `twostep` setting has this field]
Dataset Creation
The dataset is created using ETH Py150 Open dataset as source for code contexts. To get semantic queries and corresponding answer/supporting-fact spans in ETH Py150 Open corpus files, CodeQL was used.
Additional Information
Licensing Information
The source code repositories used for preparing CodeQueries are based on the ETH Py150 Open dataset and are redistributable under the respective licenses. A Huggingface dataset for ETH Py150 Open is available here. The labeling prepared and provided by us as part of CodeQueries is released under the Apache-2.0 license.
- Downloads last month
- 66