cfahlgren1
HF Staff
Fix: Collect all data first then write parquet with unified schema
c9de258
verified
| # /// script | |
| # requires-python = ">=3.10" | |
| # dependencies = [ | |
| # "pandas==2.2.2", | |
| # "aiohttp", | |
| # "python-dotenv==1.0.1", | |
| # "huggingface-hub==0.24.3", | |
| # "tenacity==9.0.0", | |
| # "pyarrow==17.0.0", | |
| # "requests", | |
| # ] | |
| # /// | |
| import json | |
| import os | |
| import asyncio | |
| import time | |
| import pandas as pd | |
| import aiohttp | |
| import requests.utils | |
| from dotenv import load_dotenv | |
| from huggingface_hub import HfApi | |
| from tenacity import retry, stop_after_attempt, wait_exponential | |
| import pyarrow as pa | |
| import pyarrow.parquet as pq | |
| load_dotenv() | |
| CACHE_DIR = ".hf_cache" | |
| os.makedirs(CACHE_DIR, exist_ok=True) | |
| # Get token from environment (works in HF Jobs) | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| api = HfApi(token=HF_TOKEN) | |
| USER_ID = api.whoami()["name"] | |
| REPO_ID = f"{USER_ID}/hub-stats" | |
| print(f"π Hugging Face Hub Stats Collector") | |
| print(f"π Dataset will be uploaded to: {REPO_ID}") | |
| print(f"π€ User: {USER_ID}") | |
| print("-" * 50) | |
| ENDPOINT_CONFIGS = { | |
| "models": { | |
| "limit": 1000, | |
| "params": { | |
| "full": "true", | |
| "config": "true", | |
| "expand[]": [ | |
| "gguf", | |
| "downloadsAllTime", | |
| "transformersInfo", | |
| "cardData", | |
| "safetensors", | |
| "baseModels", | |
| "author", | |
| "likes", | |
| "inferenceProviderMapping", | |
| "downloads", | |
| "siblings", | |
| "tags", | |
| "pipeline_tag", | |
| "lastModified", | |
| "createdAt", | |
| "config", | |
| "library_name", | |
| ], | |
| }, | |
| }, | |
| "datasets": { | |
| "limit": 1000, | |
| "params": { | |
| "full": "true", | |
| "expand[]": [ | |
| "author", | |
| "cardData", | |
| "citation", | |
| "createdAt", | |
| "disabled", | |
| "description", | |
| "downloads", | |
| "downloadsAllTime", | |
| "gated", | |
| "lastModified", | |
| "likes", | |
| "paperswithcode_id", | |
| "private", | |
| "siblings", | |
| "sha", | |
| "tags", | |
| "trendingScore", | |
| ], | |
| }, | |
| }, | |
| "spaces": {"limit": 1000, "params": {"full": "true"}}, | |
| "posts": {"limit": 50, "params": {"skip": 0}}, | |
| "daily_papers": { | |
| "limit": 50, | |
| "params": {}, | |
| "base_url": "https://huggingface.co/api/daily_papers", | |
| }, | |
| } | |
| def parse_link_header(link_header): | |
| if not link_header: | |
| return None | |
| links = requests.utils.parse_header_links(link_header) | |
| for link in links: | |
| if link.get("rel") == "next": | |
| return link.get("url") | |
| return None | |
| def to_json_string(x): | |
| return ( | |
| json.dumps(x) | |
| if isinstance(x, (dict, list)) | |
| else str(x) if x is not None else None | |
| ) | |
| def process_dataframe(df, endpoint): | |
| if len(df) == 0: | |
| return df | |
| if endpoint == "posts": | |
| if "author" in df.columns: | |
| author_df = pd.json_normalize(df["author"]) | |
| author_cols = ["avatarUrl", "followerCount", "fullname", "name"] | |
| for col in author_cols: | |
| if col in author_df.columns: | |
| df[col] = author_df[col] | |
| df = df.drop("author", axis=1) | |
| for ts_col in ["publishedAt", "updatedAt"]: | |
| if ts_col in df.columns: | |
| df[ts_col] = pd.to_datetime(df[ts_col]).dt.tz_localize(None) | |
| elif endpoint == "daily_papers": | |
| if "paper" in df.columns: | |
| paper_df = pd.json_normalize(df["paper"], errors="ignore").add_prefix( | |
| "paper_" | |
| ) | |
| df = pd.concat([df.drop("paper", axis=1), paper_df], axis=1) | |
| for ts_col in ["publishedAt", "paper_publishedAt"]: | |
| if ts_col in df.columns: | |
| df[ts_col] = pd.to_datetime(df[ts_col], errors="coerce").dt.tz_localize( | |
| None | |
| ) | |
| else: | |
| for field in ["createdAt", "lastModified"]: | |
| if field in df.columns: | |
| df[field] = pd.to_datetime(df[field], errors="coerce").dt.tz_localize( | |
| None | |
| ) | |
| if "gated" in df.columns: | |
| df["gated"] = df["gated"].astype(str) | |
| for col in ["cardData", "config", "gguf"]: | |
| if col in df.columns: | |
| df[col] = df[col].apply(to_json_string) | |
| return df | |
| def save_parquet(df, output_file): | |
| df.to_parquet(output_file, index=False, engine="pyarrow") | |
| async def fetch_data_page(session, url, params=None, headers=None): | |
| async with session.get(url, params=params, headers=headers) as response: | |
| response.raise_for_status() | |
| return await response.json(), response.headers.get("Link") | |
| def jsonl_to_parquet(endpoint, jsonl_file, output_file): | |
| if not os.path.exists(jsonl_file): | |
| print(f"β {jsonl_file} not found") | |
| return 0 | |
| # Collect all dataframes first to get unified schema | |
| all_dfs = [] | |
| with open(jsonl_file, "r") as f: | |
| for line in f: | |
| line = line.strip() | |
| if not line: | |
| continue | |
| data = json.loads(line) | |
| if endpoint == "posts": | |
| items = data.get("socialPosts", []) | |
| else: | |
| items = data | |
| if not items: | |
| continue | |
| df = pd.DataFrame(items) | |
| if df.empty: | |
| continue | |
| df = process_dataframe(df, endpoint) | |
| all_dfs.append(df) | |
| if not all_dfs: | |
| print(f" No data found for {endpoint}") | |
| return 0 | |
| # Concatenate all dataframes - pandas will unify types | |
| combined_df = pd.concat(all_dfs, ignore_index=True) | |
| total_rows = len(combined_df) | |
| # Write to parquet | |
| combined_df.to_parquet(output_file, index=False, engine="pyarrow") | |
| return total_rows | |
| async def create_parquet_files(skip_upload=False): | |
| start_time = time.time() | |
| endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"] | |
| created_files = [] | |
| jsonl_files = {} | |
| async with aiohttp.ClientSession() as session: | |
| for endpoint in endpoints: | |
| print(f"Fetching {endpoint}...") | |
| config = ENDPOINT_CONFIGS[endpoint] | |
| base_url = config.get("base_url", f"https://huggingface.co/api/{endpoint}") | |
| params = {"limit": config["limit"]} | |
| params.update(config["params"]) | |
| headers = {"Accept": "application/json"} | |
| url = base_url | |
| page = 0 | |
| jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl") | |
| with open(jsonl_file, "w") as f: | |
| pass # truncate | |
| while url: | |
| if endpoint == "posts": | |
| params["skip"] = page * params["limit"] | |
| try: | |
| data, link_header = await fetch_data_page( | |
| session, url, params, headers | |
| ) | |
| with open(jsonl_file, "a") as f: | |
| f.write(json.dumps(data) + "\n") | |
| if endpoint == "posts": | |
| total_items = data.get("numTotalItems", 0) | |
| items_on_page = len(data.get("socialPosts", [])) | |
| if (page + 1) * params["limit"] >= total_items or items_on_page == 0: | |
| url = None | |
| else: | |
| url = base_url | |
| else: | |
| url = parse_link_header(link_header) | |
| if url: | |
| params = {} | |
| page += 1 | |
| except Exception as e: | |
| print(f"Error on page {page} for {endpoint}: {e}") | |
| await asyncio.sleep(2) | |
| if page > 0: | |
| url = None | |
| else: | |
| raise | |
| print(f" Raw data for {endpoint} saved to {jsonl_file}") | |
| jsonl_files[endpoint] = jsonl_file | |
| # Convert JSONL -> Parquet with streaming writer | |
| for endpoint in endpoints: | |
| jsonl_file = jsonl_files.get(endpoint) | |
| if not jsonl_file or not os.path.exists(jsonl_file): | |
| continue | |
| print(f"Processing {endpoint} from JSONL...") | |
| output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet") | |
| total_rows = jsonl_to_parquet(endpoint, jsonl_file, output_file) | |
| print(f"β {endpoint}: {total_rows:,} rows -> {output_file}") | |
| created_files.append(output_file) | |
| if not skip_upload: | |
| upload_to_hub(output_file, REPO_ID) | |
| elapsed = time.time() - start_time | |
| return created_files, elapsed | |
| def recreate_from_jsonl(): | |
| endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"] | |
| for endpoint in endpoints: | |
| jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl") | |
| if not os.path.exists(jsonl_file): | |
| print(f"β {jsonl_file} not found") | |
| continue | |
| print(f"Recreating {endpoint} from {jsonl_file}...") | |
| output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet") | |
| total_rows = jsonl_to_parquet(endpoint, jsonl_file, output_file) | |
| print(f"β {endpoint}: {total_rows:,} rows -> {output_file}") | |
| def upload_to_hub(file_path, repo_id): | |
| try: | |
| api.upload_file( | |
| path_or_fileobj=file_path, | |
| path_in_repo=os.path.basename(file_path), | |
| repo_id=repo_id, | |
| repo_type="dataset", | |
| ) | |
| print(f"β Uploaded {os.path.basename(file_path)} to {repo_id}") | |
| return True | |
| except Exception as e: | |
| print(f"β Failed to upload {os.path.basename(file_path)}: {e}") | |
| return False | |
| def main(skip_upload=False): | |
| created_files, elapsed = asyncio.run(create_parquet_files(skip_upload=skip_upload)) | |
| print(f"\nCompleted in {elapsed:.2f} seconds") | |
| print(f"Created {len(created_files)} parquet files:") | |
| for file in created_files: | |
| size = os.path.getsize(file) | |
| pf = pq.ParquetFile(file) | |
| rows = pf.metadata.num_rows | |
| print(f" {os.path.basename(file)}: {rows:,} rows, {size:,} bytes") | |
| if skip_upload: | |
| print(f"\nRaw JSONL files saved to {CACHE_DIR}/ for recreation") | |
| print("Use 'python app.py --recreate' to recreate parquet files from JSONL") | |
| if __name__ == "__main__": | |
| import sys | |
| if "--recreate" in sys.argv: | |
| recreate_from_jsonl() | |
| else: | |
| skip_upload = "--skip-upload" in sys.argv | |
| main(skip_upload=skip_upload) | |