Datasets:
code stringlengths 1 25.8M | language stringclasses 18
values | source stringclasses 4
values | repo stringclasses 78
values | path stringlengths 0 268 |
|---|---|---|---|---|
#include <c10/util/Exception.h>
#include <utility>
namespace at {
/*
[collapse dims] Updates sizes, and strides to reflect a "collapse" of
the info, possibly excluding the optional excludeDim. A "collapsed" version
of the info is the fewest dims that order the tensor's elements in the same
way as the original info. I... | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/CollapseDims.h |
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or a... | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/segment_reduction_ops_gpu.cu.h |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing de... | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- python-indent-offset: 4; -*-
import pandas as pd
import numpy as np
import sys
import os
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.contribution_builder as cb
from ... | unknown | codeparrot/codeparrot-clean | ||
'''
Created on Jul 22, 2017
@author: matija
'''
from object_oriented.base import Sequence as seq
from collections import Iterable
#C-2.25 impl
class Vector:
def __init__(self, d):
if isinstance(d, int):
self._coords = [0] * d
else:
try:
self._coords = [x f... | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.model.doc import addchild
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = docl... | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 2018 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.... | java | github | https://github.com/mockito/mockito | mockito-extensions/mockito-junit-jupiter/src/test/java/org/mockitousage/MultiLevelNestedTest.java |
from flask import Blueprint, render_template, redirect, url_for, current_app
from deployer import database as db
from deployer.routing.models import Route
from deployer.utils import xhr_form, allow_traffic, get_container_ip
from . import models, forms
views = Blueprint('apps', __name__, template_folder='templates')
... | unknown | codeparrot/codeparrot-clean | ||
# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later ... | unknown | codeparrot/codeparrot-clean | ||
#/*******************************************************************************
# * Copyright (C) 2021 Zhu Research Group @ Rutgers-Newark
# * All rights reserved.
# *
# * This file is part of fplib.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a copy
# * of this software and associat... | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable l... | unknown | codeparrot/codeparrot-clean | ||
//
// Code generated by grafana-app-sdk. DO NOT EDIT.
//
package v1beta1
import (
"fmt"
"github.com/grafana/grafana-app-sdk/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"time"
)
// +k8s:openap... | go | github | https://github.com/grafana/grafana | apps/folder/pkg/apis/folder/v1beta1/folder_object_gen.go |
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import os
from ansible.module_utils.urls import Request, op... | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use Symfony\Bundle\FrameworkBundle\FrameworkBundle;
use Symfony\Bundle\SecurityBun... | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/SecurityBundle/Tests/Functional/app/RememberMe/bundles.php |
# -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:... | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v0alpha1",
"metadata": {
"name": "v40.refresh_empty_string.v42"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
... | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v40.refresh_empty_string.v42.v0alpha1.json |
Multilingual Code Training Dataset
321K code samples. 18 programming languages. Curated from GitHub + HuggingFace.
High-quality training data for code generation models. Zero cloud budget.
|
Curated for quality. |
Production-ready. |
Yuuki Dataset is a carefully curated collection of high-quality source code from open-source repositories and public datasets. Created specifically for training the Yuuki language models, this dataset represents real-world code across 18 programming languages, with emphasis on Python, C/C++, JavaScript, and other widely-used languages.
The dataset was assembled with zero cloud budget using streaming collection from HuggingFace Datasets and targeted cloning from popular GitHub repositories. Each sample includes the source code, detected programming language, origin source, and (where applicable) repository URL and file path.
Built with rigorous deduplication, quality filtering, and language balancing to ensure diverse, high-quality training data. All code is from permissive open-source licenses (MIT, Apache-2.0, BSD, GPL, etc.) with full attribution metadata preserved.
Dataset Summary
- Total Samples: 321,000
- Languages: 18 (Python, C, C++, JavaScript, Java, Go, Rust, and more)
- Sources: GitHub repositories + HuggingFace Datasets
- License: GPLv3
- Format: JSONL (JSON Lines)
- Splits: Train (257k) / Validation (32.1k) / Test (32.1k)
- Use Case: Training code generation and completion models
Code GenerationGenerate complete functions, classes, or modules from natural language descriptions or partial code contexts. The dataset's diverse language coverage and real-world code patterns make it ideal for training models to produce syntactically correct and idiomatic code. Code CompletionAutocomplete code as developers type. Train models to predict the next tokens, lines, or blocks based on surrounding context. Includes common patterns, API usage, and language-specific idioms. |
Program SynthesisLearn to translate specifications, comments, or natural language into executable code. The dataset's wide range of programming paradigms (imperative, functional, object-oriented) supports robust synthesis capabilities. Code TranslationCross-language translation tasks. With 18 languages represented, models can learn to convert code from one language to another while preserving functionality and idioms. |
Data Instances
Each instance in the dataset is a JSON object with the following structure:
{
"code": "def fibonacci(n):\n if n <= 1:\n return n\n return fibonacci(n-1) + fibonacci(n-2)",
"language": "python",
"source": "github",
"repo": "https://github.com/pytorch/pytorch",
"path": "examples/recursion/fibonacci.py"
}
Data Fields
| Field | Type | Description |
|---|---|---|
code |
string |
The source code content (1-25.8M characters) |
language |
string |
Detected programming language (18 values) |
source |
string |
Origin source: github, hf:dataset-name, etc. (4 unique sources) |
repo |
string |
Repository URL if sourced from GitHub (78 unique repos) |
path |
string |
Original file path within repository (0-268 characters) |
Data Splits
| Split | Samples | Percentage | Size (approx) |
|---|---|---|---|
| Train | 257,000 | 80% | ~15 GB |
| Validation | 32,100 | 10% | ~2 GB |
| Test | 32,100 | 10% | ~2 GB |
| Total | 321,200 | 100% | ~19 GB |
Splits are randomized and stratified to maintain language distribution consistency across train/validation/test sets.
The dataset covers 18 programming languages with varying representation:
| Language | Category | Primary Use Cases |
|---|---|---|
| C | Systems | Operating systems, embedded systems, performance-critical code |
| C++ | Systems | Game engines, high-performance computing, systems software |
| Python | General-purpose | Data science, web development, automation, AI/ML |
| JavaScript | Web | Frontend development, Node.js backends, full-stack applications |
| TypeScript | Web | Type-safe JavaScript for large-scale applications |
| Java | Enterprise | Android development, enterprise backends, distributed systems |
| Go | Cloud-native | Microservices, cloud infrastructure, concurrent systems |
| Rust | Systems | Memory-safe systems programming, WebAssembly, tooling |
| PHP | Web | WordPress, Laravel, server-side web development |
| Ruby | Web | Rails applications, scripting, web backends |
| Swift | Mobile | iOS, macOS, watchOS, tvOS application development |
| Kotlin | Mobile | Android development, server-side applications |
| HTML | Markup | Web page structure and content |
| CSS | Styling | Web page styling and layout |
| SQL | Database | Database queries, schema definitions, data manipulation |
| Shell | Scripting | Bash, Zsh, shell automation scripts |
| JSON | Data | Configuration files, API responses, data interchange |
| YAML | Configuration | Config files, CI/CD pipelines, infrastructure as code |
Curation Rationale
This dataset was created to train the Yuuki code generation models on resource-constrained hardware (specifically, a Snapdragon 685 smartphone) with zero cloud budget. The curation process prioritized:
- Quality over quantity — Aggressive filtering for syntax correctness, readability, and real-world patterns
- Language diversity — Balanced representation across major programming languages
- License compliance — Only permissive open-source licenses with full attribution
- Deduplication — Advanced MinHash LSH for near-duplicate detection (80% similarity threshold)
- Reproducibility — All sources documented with repository URLs and file paths
Source Data
HuggingFace Datasets
bigcode/the-stack-dedup— Deduplicated subset of The Stackbigcode/starcoderdata— StarCoder training corpuscode_search_net— CodeSearchNet dataset (all languages)codeparrot/github-code— GitHub code samples- Additional curated code datasets
GitHub Repositories
78 popular open-source repositories were cloned and filtered:
- Python: Django, Flask, NumPy, Pandas, PyTorch, TensorFlow, scikit-learn
- JavaScript/TypeScript: React, Vue, Angular, Next.js, Node.js, Express
- Systems: Linux kernel, PostgreSQL, Redis, Nginx, curl, Git
- Languages: Rust, Go, Kotlin, Swift language implementations
- Frameworks: Spring Boot, Laravel, Rails, and more
Full repository list available in dataset metadata.
Data Collection
- Streaming Collection from HuggingFace Datasets (target: ~10GB)
- GitHub Cloning with shallow clones (depth=1) for efficiency
- File Extraction filtering by extension (.py, .js, .c, .cpp, etc.)
- Language Detection based on file extension and content analysis
- Quality Filtering removing minified, generated, and binary files
- Deduplication using SHA-256 exact matching + MinHash LSH (80% threshold)
- Balancing to prevent language dominance (no single language >20%)
- Splitting into 80/10/10 train/validation/test sets
Preprocessing
- Normalization: Line ending conversion to
\n, trailing whitespace removal - Validation: Length checks (50-50,000 characters), line length heuristics
- Exclusion: Binary files, minified code, generated files (e.g.,
_pb2.py,.min.js) - Pattern Filtering: Removed
node_modules,vendor,__pycache__, build artifacts
Social Impact
Democratizes access to high-quality code training data for researchers and developers without access to expensive compute resources or proprietary datasets. Enables training competitive code models on consumer hardware.
Discussion of Biases
Language BiasOverrepresentation of popular languages (Python, JavaScript, C/C++). Underrepresentation of niche or domain-specific languages (Fortran, COBOL, R). Domain BiasWeb development and data science code is overrepresented compared to embedded systems, scientific computing, or enterprise applications. |
Cultural BiasEnglish-centric variable names, comments, and documentation. Code from Western/US developers may dominate due to GitHub's demographics. Recency BiasModern coding patterns favored. Legacy code, deprecated APIs, and historical programming styles underrepresented. |
Other Known Limitations
- Snapshot in time: Dataset reflects code patterns from early 2026
- Quality variance: Some low-quality or educational code may remain despite filtering
- License diversity: Mix of licenses (MIT, Apache, GPL, BSD); users must verify compatibility for commercial use
- Incomplete attribution: Some samples from aggregated datasets may lack complete provenance
Load with HuggingFace Datasets
from datasets import load_dataset
# Load the full dataset
dataset = load_dataset("OpceanAI/Yuuki-dataset")
# Access splits
train_data = dataset["train"]
val_data = dataset["validation"]
test_data = dataset["test"]
# Iterate through samples
for sample in train_data:
code = sample["code"]
language = sample["language"]
print(f"Language: {language}")
print(f"Code: {code[:100]}...") # First 100 chars
Load with Pandas
import pandas as pd
# Load a specific split
df = pd.read_json("hf://datasets/OpceanAI/Yuuki-dataset/train-00000-of-00001.parquet")
# Filter by language
python_code = df[df["language"] == "python"]
# Group by source
by_source = df.groupby("source").size()
print(by_source)
Filter by Language
from datasets import load_dataset
dataset = load_dataset("OpceanAI/Yuuki-dataset", split="train")
# Get all Python samples
python_samples = dataset.filter(lambda x: x["language"] == "python")
# Get all JavaScript/TypeScript samples
js_samples = dataset.filter(lambda x: x["language"] in ["javascript", "typescript"])
If you use this dataset in your research or projects, please cite:
@misc{yuuki-dataset-2026,
author = {agua_omg},
title = {Yuuki Code Dataset: Multilingual Code Training Data},
year = {2026},
publisher = {HuggingFace},
howpublished = {\url{https://huggingface.co/datasets/OpceanAI/Yuuki-dataset}},
doi = {10.57967/hf/7809}
}
| Project | Description |
|---|---|
| Yuuki Models | Code generation models trained on this dataset |
| Yuuki API | Inference API for Yuuki models |
| Yuuki Chat | Web chat interface for Yuuki models |
| yuy CLI | Command-line tool for running Yuuki models |
| yuy-chat | Terminal UI chat interface |
| Yuuki Web | Official landing page |
Copyright (C) 2026 YuuKi-OS
This project is licensed under the GNU General Public License v3.0 (GPL-3.0). See the LICENSE file for the full text.
Individual Code Licenses
While the dataset itself is released under GPLv3 License, individual code samples within the dataset retain their original licenses. When using code from this dataset:
- Verify the license of the source repository before commercial use
- Respect original attributions and copyright notices
- Common licenses in dataset: MIT, Apache-2.0, BSD-2-Clause, BSD-3-Clause, GPL-2.0, GPL-3.0, LGPL
See the repo and source fields in each sample for license information of the original source.
This dataset builds upon the incredible work of:
- BigCode — The Stack and StarCoder datasets
- GitHub — Open-source repository hosting
- HuggingFace — Dataset hosting and infrastructure
- All open-source contributors whose code is included in this dataset
Special thanks to the maintainers of the 78 repositories included in this collection.
- Downloads last month
- 98