# isonetpp_loader.py from __future__ import annotations import os from typing import Optional, Dict from huggingface_hub import hf_hub_download from subiso_dataset import ( SubgraphIsomorphismDataset, # TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE TRAIN_MODE, VAL_MODE, TEST_MODE ) # ---------------------------- # Helpers # ---------------------------- def _pairs_for_size(dataset_size: str) -> str: return "80k" if dataset_size == "small" else "240k" def _folder_for_size(dataset_size: str) -> str: return "small_dataset" if dataset_size == "small" else "large_dataset" def _normalize_name(base_name: str, dataset_size: str) -> str: """ Accepts 'aids' or 'aids240k' (and similarly for other sets). If bare name -> append pairs; if already has 80k/240k -> keep as-is. """ pairs = _pairs_for_size(dataset_size) if base_name.endswith(("80k", "240k")): return base_name return f"{base_name}{pairs}" def _mode_prefix_and_dir(mode: str) -> tuple[str, str]: """ File prefix uses 'test' when mode contains 'test' (repo convention). Directory has train/val/test. Map Extra_test_300 => 'test'. """ prefix = "test" if "test" in mode.lower() else mode mode_dir = "test" if "test" in mode.lower() else mode return prefix, mode_dir # ---------------------------- # Path resolution + downloads # ---------------------------- def _ensure_paths( repo_id: str, mode: str, dataset_name: str, # 'aids' or 'aids240k' dataset_size: str, # 'small' | 'large' local_root: Optional[str] = None, ) -> Dict[str, str]: """ Download the three files needed into cache (or local_root if set): - large_dataset/splits//__query_subgraphs.pkl - large_dataset/splits//__rel_nx_is_subgraph_iso.pkl - large_dataset/corpus/_corpus_subgraphs.pkl where is normalized (contains 80k/240k exactly once). """ folder = _folder_for_size(dataset_size) # "large_dataset" or "small_dataset" base = _normalize_name(dataset_name, dataset_size) # e.g., "aids240k" # prefix, mode_dir = _mode_prefix_and_dir(mode) query_fname = f"{mode}_{base}_query_subgraphs.pkl" rel_fname = f"{mode}_{base}_rel_nx_is_subgraph_iso.pkl" corpus_fname = f"{base}_corpus_subgraphs.pkl" repo_query_path = f"{folder}/splits/{mode}/{query_fname}" repo_rel_path = f"{folder}/splits/{mode}/{rel_fname}" repo_corpus_path = f"{folder}/corpus/{corpus_fname}" kwargs = dict( repo_id=repo_id, repo_type="dataset", local_dir=local_root, local_dir_use_symlinks=False, ) query_path = hf_hub_download(filename=repo_query_path, **kwargs) rel_path = hf_hub_download(filename=repo_rel_path, **kwargs) corpus_path = hf_hub_download(filename=repo_corpus_path, **kwargs) return {"query": query_path, "rel": rel_path, "corpus": corpus_path} # ---------------------------- # Public entrypoint # ---------------------------- def load_isonetpp_benchmark( repo_id: str = "structlearning/isonetpp-benchmark", mode: str = "train", # "train" | "val" | "test" | "Extra_test_300" dataset_name: str = "aids", # "aids" or "aids240k" (same for mutag/ptc_*) dataset_size: str = "large", # "small" | "large" batch_size: int = 128, data_type: str = "gmn", # "pyg" or "gmn" device: Optional[str] = None, download_root: Optional[str] = None, ): # Map to class constantss mode_map = { "train": TRAIN_MODE, "val": VAL_MODE, "test": TEST_MODE, # "extra_test_300": BROAD_TEST_MODE, # "Extra_test_300": BROAD_TEST_MODE, } mode_norm = mode_map.get(mode, mode) paths = _ensure_paths( repo_id=repo_id, mode=mode_norm, dataset_name=dataset_name, dataset_size=dataset_size, local_root=download_root, ) # paths["query"] = ...//splits// # We want dataset_base_path to be the **parent of ** so that: # dataset_base_path / dataset_path_override / splits//... exists # Compute levels carefully: # file_dir = ...//splits/ # splits_dir = ...//splits # folder_dir = .../ # parent_dir = parent of file_dir = os.path.dirname(paths["query"]) splits_dir = os.path.dirname(file_dir) folder_dir = os.path.dirname(splits_dir) parent_dir = os.path.dirname(folder_dir) # <-- this is the correct dataset_base_path dataset_config = dict( mode=mode_norm, dataset_name=dataset_name, dataset_size=dataset_size, batch_size=batch_size, data_type=data_type, dataset_base_path=parent_dir, # parent of dataset_path_override=None,# _folder_for_size(dataset_size), # "large_dataset"/"small_dataset" experiment=None, device=device, ) return SubgraphIsomorphismDataset(**dataset_config)