github-code-clean / github-code-clean.py
KT313's picture
stop iterating through dataset after max_samples is reached and read language filter columns first (with license in the beginning now)
d36943b verified
raw
history blame
9.19 kB
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GitHub Code clean dataset."""
import os
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
_REPO_NAME = "codeparrot/github-code-clean"
_LANG_TO_EXTENSION = {
"Assembly": [".asm"],
"Batchfile": [".bat", ".cmd"],
"C": [".c", ".h"],
"C#": [".cs"],
"C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"],
"CMake": [".cmake"],
"CSS": [".css"],
"Dockerfile": [".dockerfile", "Dockerfile"],
"FORTRAN": ['.f90', '.f', '.f03', '.f08', '.f77', '.f95', '.for', '.fpp'],
"GO": [".go"],
"Haskell": [".hs"],
"HTML":[".html"],
"Java": [".java"],
"JavaScript": [".js"],
"Julia": [".jl"],
"Lua": [".lua"],
"Makefile": ["Makefile"],
"Markdown": [".md", ".markdown"],
"PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"],
"Perl": [".pl", ".pm", ".pod", ".perl"],
"PowerShell": ['.ps1', '.psd1', '.psm1'],
"Python": [".py"],
"Ruby": [".rb"],
"Rust": [".rs"],
"SQL": [".sql"],
"Scala": [".scala"],
"Shell": [".sh", ".bash", ".command", ".zsh"],
"TypeScript": [".ts", ".tsx"],
"TeX": [".tex"],
"Visual Basic": [".vb"]
}
_LICENSES = ['mit',
'apache-2.0',
'gpl-3.0',
'gpl-2.0',
'bsd-3-clause',
'agpl-3.0',
'lgpl-3.0',
'lgpl-2.1',
'bsd-2-clause',
'cc0-1.0',
'epl-1.0',
'mpl-2.0',
'unlicense',
'isc',
'artistic-2.0']
_DESCRIPTION = """\
The GitHub Code clean dataset in a more filtered version of codeparrot/github-code dataset, it consists of 115M code files from GitHub in 32 programming \
languages with 60 extensions totaling in almost 1TB of text data.
"""
_HOMEPAGE = "https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code/"
_EXTENSION_TO_LANG = {}
for lang in _LANG_TO_EXTENSION:
for extension in _LANG_TO_EXTENSION[lang]:
_EXTENSION_TO_LANG[extension] = lang
_LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
_LICENSE_CONFIGS = ["all"] + _LICENSES
class GithubCodeConfig(datasets.BuilderConfig):
"""BuilderConfig for the GitHub Code dataset."""
def __init__(self, *args, languages=["all"], licenses=["all"], max_samples=None, **kwargs):
"""BuilderConfig for the GitHub Code dataset.
Args:
languages (:obj:`List[str]`): List of languages to load.
licenses (:obj:`List[str]`): List of licenses to load.
max_samples (:obj:`int`, optional): Maximum number of samples to generate (for early stopping).
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name="+".join(languages)+"-"+"+".join(licenses),
**kwargs,
)
languages = set(languages)
licenses = set(licenses)
assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}."
assert all([license in _LICENSE_CONFIGS for license in licenses]), f"License not in {_LICENSE_CONFIGS}."
if "all" in languages:
assert len(languages)==1, "Passed 'all' together with other languages."
self.filter_languages = False
else:
self.filter_languages = True
if "all" in licenses:
assert len(licenses)==1, "Passed 'all' together with other licenses."
self.filter_licenses = False
else:
self.filter_licenses = True
self.languages = set(languages)
self.licenses = set(licenses)
self.max_samples = max_samples
class GithubCode(datasets.GeneratorBasedBuilder):
"""GitHub Code dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = GithubCodeConfig
BUILDER_CONFIGS = [GithubCodeConfig(languages=[lang], licenses=[license]) for lang in _LANG_CONFIGS
for license in _LICENSE_CONFIGS]
DEFAULT_CONFIG_NAME = "all-all"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"code": datasets.Value("string"),
"repo_name": datasets.Value("string"),
"path": datasets.Value("string"),
"language": datasets.Value("string"),
"license": datasets.Value("string"),
"size": datasets.Value("int32")}),
supervised_keys=None,
homepage=_HOMEPAGE,
license="Multiple: see the 'license' field of each sample.",
)
def _split_generators(self, dl_manager):
num_shards = 880
data_files = [
f"data/train-{_index:05d}-of-{num_shards:05d}.parquet"
for _index in range(num_shards)
]
files = dl_manager.download(data_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": files,
},
),
]
def _generate_examples(self, files):
key = 0
yielded_count = 0
max_samples = self.config.max_samples
for file_idx, file in enumerate(files):
# Early stopping at file level
if max_samples is not None and yielded_count >= max_samples:
return
parquet_file = pq.ParquetFile(file)
# Process each row group separately (Parquet internal chunking)
for rg_idx in range(parquet_file.num_row_groups):
# Early stopping at row group level
if max_samples is not None and yielded_count >= max_samples:
return
# PASS 1: Read ONLY filter columns from this row group
filter_table = parquet_file.read_row_group(rg_idx, columns=['path', 'license'])
paths = filter_table['path'].to_pylist()
licenses = filter_table['license'].to_pylist()
# Find matching indices within this row group
matching_indices = []
matching_langs = []
for row_index in range(len(paths)):
if max_samples is not None and yielded_count + len(matching_indices) >= max_samples:
break
lang = lang_from_name(paths[row_index])
license = licenses[row_index]
if self.config.filter_languages and lang not in self.config.languages:
continue
if self.config.filter_licenses and license not in self.config.licenses:
continue
matching_indices.append(row_index)
matching_langs.append(lang)
# PASS 2: Read full row group ONLY if there are matches
if matching_indices:
# Now read ALL columns for this row group
full_table = parquet_file.read_row_group(rg_idx)
# Extract only matching rows
filtered_table = full_table.take(matching_indices)
batch_dict = filtered_table.to_pydict()
# Yield all matching rows
for i in range(len(matching_indices)):
yield key, {
"code": batch_dict['code'][i],
"repo_name": batch_dict['repo_name'][i],
"path": batch_dict['path'][i],
"license": batch_dict['license'][i],
"language": matching_langs[i],
"size": int(batch_dict['size'][i])
}
key += 1
yielded_count += 1
if max_samples is not None and yielded_count >= max_samples:
return
def lang_from_name(name):
for extension in _EXTENSION_TO_LANG:
if name.endswith(extension):
return _EXTENSION_TO_LANG[extension]
return None