|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""GitHub Code clean dataset.""" |
|
|
|
|
|
import os |
|
|
|
|
|
import pyarrow as pa |
|
|
import pyarrow.parquet as pq |
|
|
|
|
|
import datasets |
|
|
|
|
|
_REPO_NAME = "codeparrot/github-code-clean" |
|
|
|
|
|
_LANG_TO_EXTENSION = { |
|
|
"Assembly": [".asm"], |
|
|
"Batchfile": [".bat", ".cmd"], |
|
|
"C": [".c", ".h"], |
|
|
"C#": [".cs"], |
|
|
"C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"], |
|
|
"CMake": [".cmake"], |
|
|
"CSS": [".css"], |
|
|
"Dockerfile": [".dockerfile", "Dockerfile"], |
|
|
"FORTRAN": ['.f90', '.f', '.f03', '.f08', '.f77', '.f95', '.for', '.fpp'], |
|
|
"GO": [".go"], |
|
|
"Haskell": [".hs"], |
|
|
"HTML":[".html"], |
|
|
"Java": [".java"], |
|
|
"JavaScript": [".js"], |
|
|
"Julia": [".jl"], |
|
|
"Lua": [".lua"], |
|
|
"Makefile": ["Makefile"], |
|
|
"Markdown": [".md", ".markdown"], |
|
|
"PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"], |
|
|
"Perl": [".pl", ".pm", ".pod", ".perl"], |
|
|
"PowerShell": ['.ps1', '.psd1', '.psm1'], |
|
|
"Python": [".py"], |
|
|
"Ruby": [".rb"], |
|
|
"Rust": [".rs"], |
|
|
"SQL": [".sql"], |
|
|
"Scala": [".scala"], |
|
|
"Shell": [".sh", ".bash", ".command", ".zsh"], |
|
|
"TypeScript": [".ts", ".tsx"], |
|
|
"TeX": [".tex"], |
|
|
"Visual Basic": [".vb"] |
|
|
} |
|
|
|
|
|
_LICENSES = ['mit', |
|
|
'apache-2.0', |
|
|
'gpl-3.0', |
|
|
'gpl-2.0', |
|
|
'bsd-3-clause', |
|
|
'agpl-3.0', |
|
|
'lgpl-3.0', |
|
|
'lgpl-2.1', |
|
|
'bsd-2-clause', |
|
|
'cc0-1.0', |
|
|
'epl-1.0', |
|
|
'mpl-2.0', |
|
|
'unlicense', |
|
|
'isc', |
|
|
'artistic-2.0'] |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
The GitHub Code clean dataset in a more filtered version of codeparrot/github-code dataset, it consists of 115M code files from GitHub in 32 programming \ |
|
|
languages with 60 extensions totaling in almost 1TB of text data. |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code/" |
|
|
|
|
|
|
|
|
_EXTENSION_TO_LANG = {} |
|
|
for lang in _LANG_TO_EXTENSION: |
|
|
for extension in _LANG_TO_EXTENSION[lang]: |
|
|
_EXTENSION_TO_LANG[extension] = lang |
|
|
|
|
|
|
|
|
|
|
|
_LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys()) |
|
|
_LICENSE_CONFIGS = ["all"] + _LICENSES |
|
|
|
|
|
class GithubCodeConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for the GitHub Code dataset.""" |
|
|
|
|
|
def __init__(self, *args, languages=["all"], licenses=["all"], max_samples=None, **kwargs): |
|
|
"""BuilderConfig for the GitHub Code dataset. |
|
|
Args: |
|
|
languages (:obj:`List[str]`): List of languages to load. |
|
|
licenses (:obj:`List[str]`): List of licenses to load. |
|
|
max_samples (:obj:`int`, optional): Maximum number of samples to generate (for early stopping). |
|
|
**kwargs: keyword arguments forwarded to super. |
|
|
""" |
|
|
super().__init__( |
|
|
*args, |
|
|
name="+".join(languages)+"-"+"+".join(licenses), |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
languages = set(languages) |
|
|
licenses = set(licenses) |
|
|
|
|
|
assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}." |
|
|
assert all([license in _LICENSE_CONFIGS for license in licenses]), f"License not in {_LICENSE_CONFIGS}." |
|
|
|
|
|
if "all" in languages: |
|
|
assert len(languages)==1, "Passed 'all' together with other languages." |
|
|
self.filter_languages = False |
|
|
else: |
|
|
self.filter_languages = True |
|
|
|
|
|
if "all" in licenses: |
|
|
assert len(licenses)==1, "Passed 'all' together with other licenses." |
|
|
self.filter_licenses = False |
|
|
else: |
|
|
self.filter_licenses = True |
|
|
|
|
|
self.languages = set(languages) |
|
|
self.licenses = set(licenses) |
|
|
self.max_samples = max_samples |
|
|
|
|
|
|
|
|
class GithubCode(datasets.GeneratorBasedBuilder): |
|
|
"""GitHub Code dataset.""" |
|
|
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
BUILDER_CONFIG_CLASS = GithubCodeConfig |
|
|
BUILDER_CONFIGS = [GithubCodeConfig(languages=[lang], licenses=[license]) for lang in _LANG_CONFIGS |
|
|
for license in _LICENSE_CONFIGS] |
|
|
DEFAULT_CONFIG_NAME = "all-all" |
|
|
|
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=datasets.Features({"code": datasets.Value("string"), |
|
|
"repo_name": datasets.Value("string"), |
|
|
"path": datasets.Value("string"), |
|
|
"language": datasets.Value("string"), |
|
|
"license": datasets.Value("string"), |
|
|
"size": datasets.Value("int32")}), |
|
|
supervised_keys=None, |
|
|
homepage=_HOMEPAGE, |
|
|
license="Multiple: see the 'license' field of each sample.", |
|
|
|
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
num_shards = 880 |
|
|
data_files = [ |
|
|
f"data/train-{_index:05d}-of-{num_shards:05d}.parquet" |
|
|
for _index in range(num_shards) |
|
|
] |
|
|
files = dl_manager.download(data_files) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"files": files, |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, files): |
|
|
key = 0 |
|
|
yielded_count = 0 |
|
|
max_samples = self.config.max_samples |
|
|
|
|
|
for file_idx, file in enumerate(files): |
|
|
|
|
|
if max_samples is not None and yielded_count >= max_samples: |
|
|
return |
|
|
|
|
|
parquet_file = pq.ParquetFile(file) |
|
|
|
|
|
|
|
|
for rg_idx in range(parquet_file.num_row_groups): |
|
|
|
|
|
if max_samples is not None and yielded_count >= max_samples: |
|
|
return |
|
|
|
|
|
|
|
|
filter_table = parquet_file.read_row_group(rg_idx, columns=['path', 'license']) |
|
|
|
|
|
paths = filter_table['path'].to_pylist() |
|
|
licenses = filter_table['license'].to_pylist() |
|
|
|
|
|
|
|
|
matching_indices = [] |
|
|
matching_langs = [] |
|
|
|
|
|
for row_index in range(len(paths)): |
|
|
if max_samples is not None and yielded_count + len(matching_indices) >= max_samples: |
|
|
break |
|
|
|
|
|
lang = lang_from_name(paths[row_index]) |
|
|
license = licenses[row_index] |
|
|
|
|
|
if self.config.filter_languages and lang not in self.config.languages: |
|
|
continue |
|
|
if self.config.filter_licenses and license not in self.config.licenses: |
|
|
continue |
|
|
|
|
|
matching_indices.append(row_index) |
|
|
matching_langs.append(lang) |
|
|
|
|
|
|
|
|
if matching_indices: |
|
|
|
|
|
full_table = parquet_file.read_row_group(rg_idx) |
|
|
|
|
|
|
|
|
filtered_table = full_table.take(matching_indices) |
|
|
batch_dict = filtered_table.to_pydict() |
|
|
|
|
|
|
|
|
for i in range(len(matching_indices)): |
|
|
yield key, { |
|
|
"code": batch_dict['code'][i], |
|
|
"repo_name": batch_dict['repo_name'][i], |
|
|
"path": batch_dict['path'][i], |
|
|
"license": batch_dict['license'][i], |
|
|
"language": matching_langs[i], |
|
|
"size": int(batch_dict['size'][i]) |
|
|
} |
|
|
key += 1 |
|
|
yielded_count += 1 |
|
|
|
|
|
if max_samples is not None and yielded_count >= max_samples: |
|
|
return |
|
|
|
|
|
|
|
|
def lang_from_name(name): |
|
|
for extension in _EXTENSION_TO_LANG: |
|
|
if name.endswith(extension): |
|
|
return _EXTENSION_TO_LANG[extension] |
|
|
return None |