import streamlit as st from typing import List, Dict, Any, TypedDict, Optional, Tuple from dataclasses import dataclass from datetime import datetime import json import ipaddress import os try: import requests except Exception: requests = None # Optional libraries try: from duckduckgo_search import DDGS except Exception: DDGS = None try: from PyPDF2 import PdfReader except Exception: PdfReader = None try: import docx except Exception: docx = None try: import olefile except Exception: olefile = None try: from mutagen import File as MutagenFile except Exception: MutagenFile = None try: from rapidfuzz import fuzz except Exception: fuzz = None try: import exifread except Exception: exifread = None try: import networkx as nx except Exception: nx = None try: from pyvis.network import Network except Exception: Network = None try: from sentence_transformers import SentenceTransformer except Exception: SentenceTransformer = None try: from jinja2 import Template except Exception: Template = None # --------------------------- # Config & Styles # --------------------------- st.set_page_config(page_title="OSINT Investigator", layout="wide") HIDE_STREAMLIT_STYLE = """ """ st.markdown(HIDE_STREAMLIT_STYLE, unsafe_allow_html=True) st.markdown("""
""", unsafe_allow_html=True) if st.session_state.get("settings", {}).get("light_mode"): st.markdown("""""", unsafe_allow_html=True) else: st.markdown("""""", unsafe_allow_html=True) # --------------------------- # Sidebar: Settings # --------------------------- def _get_settings() -> Dict[str, Any]: with st.sidebar: st.header("Settings") model = st.selectbox( "Advisor model (CPU-friendly)", [ "qwen2.5-1.5b-instruct", "phi-3-mini-4k-instruct", "gemma-2-2b-it", ], index=0, key="advisor_model_select", help="Choose which free local LLM to use for advisor suggestions." ) max_per = st.slider( "Default max results per dork", min_value=3, max_value=50, value=10, step=1, key="default_max_results", help="Used as the default when executing dorks in Step 4." ) logging = st.checkbox( "Enable audit logging", value=True, key="enable_audit_logging", help="If off, actions won't be written to the audit trail." ) use_embeddings = st.checkbox( "Enable semantic similarity (embeddings)", value=False, key="enable_embeddings", help="Loads a small sentence-transformer to boost scoring by context relevance." ) light_mode = st.checkbox( "Light mode UI override", value=False, key="light_mode_toggle", help="Apply a lighter palette without reloading base theme" ) return {"model": model, "max_per": max_per, "logging": logging, "light_mode": light_mode} SETTINGS = _get_settings() st.session_state["settings"] = SETTINGS st.session_state.setdefault("_embed_model", None) # --------------------------- # Google Dorks (typed catalog for many entities) # --------------------------- class TypedDork(TypedDict): q: str type: str why: str # Dork category glossary (shown in explainer) DORK_TYPES: Dict[str, str] = { "Footprinting": "Map surface area: sites/subdomains, logins, admin panels, basic presence.", "Directory/Index": "Hunt for open listings or auto-generated indexes exposing files.", "Docs/Collab": "Live docs/boards accidentally exposed (docs.google, Trello, etc.).", "Code/Repo": "Public repos that may contain references, issues, or credentials.", "Credentials/Secrets": "Clues that hint at passwords/keys or places leaks may exist.", "Exposure/Leak": "Mentions of breaches, leaks, or dumps involving the entity.", "People/Profiles": "Official bios, resumes/CVs, speaker pages, researcher profiles.", "Social Activity": "Usernames/handles across social and developer communities.", "Regulatory/Legal": "Filings and official records (e.g., SEC/EDGAR).", "Incidents/Risk": "Incident reports, outages, protests, negative events.", "Academic/Research": "Scholarly/technical works tied to a name or org.", } # ---- Typed dork builders ---- def typed_dorks_for_email(email: str) -> List[TypedDork]: user, dom = (email.split("@", 1) + [""])[:2] return [ {"q": f'"{email}"', "type": "Footprinting", "why": "Exact email mentions across the web."}, {"q": f'intext:"{email}"', "type": "Footprinting", "why": "Mentions inside page bodies."}, {"q": f'intext:"{user}" intext:"{dom}"', "type": "Footprinting", "why": "Mentions with split user/domain."}, {"q": f'site:{dom} intext:"@{dom}"', "type": "Footprinting", "why": "Emails published on the same domain."}, {"q": f'"{email}" filetype:pdf OR filetype:doc OR filetype:xls OR filetype:csv', "type": "Docs/Collab", "why": "Docs that may expose PII/roles."}, {"q": f'"{email}" site:github.com OR site:gitlab.com', "type": "Code/Repo", "why": "Commits/issues referencing the email."}, {"q": f'"{email}" site:gravatar.com', "type": "People/Profiles", "why": "Avatar/profile tied to the email hash."}, {"q": f'"{email}" site:pastebin.com OR site:ghostbin.com OR site:hastebin.com', "type": "Exposure/Leak", "why": "Common paste sites for leaks."}, {"q": f'"{email}" inurl:wp- OR inurl:wp-content OR inurl:wp-config', "type": "Directory/Index", "why": "WordPress artifacts sometimes leak emails."}, {"q": f'"{email}" AROUND(3) "password"', "type": "Credentials/Secrets", "why": "Heuristic for password-adjacent mentions."}, ] def typed_dorks_for_domain(d: str) -> List[TypedDork]: return [ {"q": f"site:{d} -www", "type": "Footprinting", "why": "Apex domain excluding www."}, {"q": f"site:*.{d} -www", "type": "Footprinting", "why": "Enumerate subdomains exposed to crawlers."}, {"q": f'"@{d}"', "type": "Footprinting", "why": "Emails belonging to the domain across the web."}, {"q": f'site:linkedin.com "{d}"', "type": "People/Profiles", "why": "Employees listing org domain."}, {"q": f'site:github.com "{d}"', "type": "Code/Repo", "why": "Repositories/issues referencing the domain."}, {"q": f'site:gitlab.com "{d}"', "type": "Code/Repo", "why": "Alternate forge often used by teams."}, {"q": f'site:docs.google.com "{d}"', "type": "Docs/Collab", "why": "Potentially exposed Google Docs/Sheets/Slides."}, {"q": f'site:trello.com "{d}"', "type": "Docs/Collab", "why": "Public Trello boards occasionally misconfigured."}, {"q": f'"{d}" filetype:pdf OR filetype:doc OR filetype:xls OR filetype:ppt OR filetype:csv', "type": "Docs/Collab", "why": "Documents with the org name/domain."}, {"q": f"site:{d} inurl:login OR inurl:admin OR inurl:signup", "type": "Footprinting", "why": "Auth surfaces (discovery only)."}, {"q": f'site:{d} intitle:"index of"', "type": "Directory/Index", "why": "Open directory listings on that domain."}, {"q": f"site:{d} ext:env OR ext:.git OR ext:git-credentials OR ext:sql OR ext:log", "type": "Credentials/Secrets", "why": "Common secret-bearing file extensions."}, {"q": f'"{d}" breach OR leak OR "data exposure"', "type": "Exposure/Leak", "why": "Press and trackers mentioning exposures."}, ] def typed_dorks_for_ip(ip: str) -> List[TypedDork]: return [ {"q": f'"{ip}"', "type": "Footprinting", "why": "Places where the raw IP is printed or logged."}, {"q": f'intext:"{ip}"', "type": "Footprinting", "why": "Body text mentions (forums, logs)."}, {"q": f'"{ip}" filetype:log OR filetype:txt', "type": "Directory/Index", "why": "Exposed logs referencing the IP."}, {"q": f'"{ip}" blacklist OR abuse', "type": "Incidents/Risk", "why": "Blacklist/abuse mentions and reports."}, {"q": f'"{ip}" intitle:"index of"', "type": "Directory/Index", "why": "Open indexes listing files with that IP."}, ] def typed_dorks_for_username(u: str) -> List[TypedDork]: return [ {"q": f'"{u}"', "type": "Footprinting", "why": "Exact handle mentions across the web."}, {"q": f'"{u}" site:twitter.com OR site:x.com OR site:reddit.com OR site:github.com OR site:stackexchange.com', "type": "Social Activity", "why": "Find consistent identity across major platforms."}, {"q": f'"{u}" site:medium.com OR site:substack.com', "type": "People/Profiles", "why": "Author pages tied to the handle."}, {"q": f'"{u}" site:keybase.io', "type": "People/Profiles", "why": "Cryptographic identity/proofs."}, {"q": f'"{u}" inurl:users OR inurl:profile', "type": "Footprinting", "why": "Generic user profile URLs."}, {"q": f'"{u}" filetype:pdf resume OR "curriculum vitae"', "type": "People/Profiles", "why": "CVs/resumes listing the handle."}, {"q": f'"{u}" AROUND(3) email', "type": "People/Profiles", "why": "Correlate handle to emails in bios/posts."}, {"q": f'"{u}" avatar OR "profile photo"', "type": "People/Profiles", "why": "Images tied to the identity."}, ] def typed_dorks_for_person(name: str) -> List[TypedDork]: return [ {"q": f'"{name}"', "type": "Footprinting", "why": "Exact full-name mentions."}, {"q": f'"{name}" site:linkedin.com', "type": "People/Profiles", "why": "Primary professional profile."}, {"q": f'"{name}" filetype:pdf resume OR "curriculum vitae"', "type": "People/Profiles", "why": "Resume/CV documents."}, {"q": f'"{name}" conference OR talk OR keynote', "type": "People/Profiles", "why": "Speaker bios and conference pages."}, {"q": f'"{name}" site:github.com OR site:gitlab.com', "type": "Code/Repo", "why": "Developer activity tied to the name."}, {"q": f'"{name}" site:researchgate.net OR site:scholar.google.com', "type": "Academic/Research", "why": "Scholarly output."}, {"q": f'"{name}" site:medium.com OR site:substack.com', "type": "People/Profiles", "why": "Editorial/social writing."}, {"q": f'"{name}" "email" OR "contact"', "type": "People/Profiles", "why": "Pages listing contact info."}, ] def typed_dorks_for_org(org: str) -> List[TypedDork]: return [ {"q": f'"{org}" site:sec.gov OR site:edgar', "type": "Regulatory/Legal", "why": "Official SEC/EDGAR filings."}, {"q": f'"{org}" contract award OR RFP OR "sources sought"', "type": "Regulatory/Legal", "why": "Gov procurement history and notices."}, {"q": f'"{org}" breach OR incident OR "data exposure"', "type": "Incidents/Risk", "why": "News/trackers about incidents/leaks."}, {"q": f'"{org}" site:linkedin.com', "type": "People/Profiles", "why": "Employees and org page."}, {"q": f'"{org}" site:github.com OR site:gitlab.com', "type": "Code/Repo", "why": "Public repos under org name."}, {"q": f'"{org}" filetype:pdf OR filetype:doc OR filetype:ppt OR filetype:xls', "type": "Docs/Collab", "why": "Documents carrying org name."}, {"q": f'"{org}" site:docs.google.com OR site:trello.com', "type": "Docs/Collab", "why": "Potentially exposed docs/boards."}, ] def typed_dorks_for_location(loc: str) -> List[TypedDork]: return [ {"q": f'"{loc}" incident OR protest OR outage', "type": "Incidents/Risk", "why": "Events/incidents tied to the place."}, {"q": f'"{loc}" satellite imagery OR "before after"', "type": "Footprinting", "why": "Imagery context for geospatial checks."}, {"q": f'"{loc}" site:news', "type": "Incidents/Risk", "why": "Recent news mentions for the place."}, {"q": f'"{loc}" filetype:pdf report', "type": "Docs/Collab", "why": "Reports that reference the location."}, ] def typed_dorks_for_file(desc: str) -> List[TypedDork]: return [ {"q": f'"{desc}" filetype:pdf OR filetype:doc OR filetype:xls OR filetype:ppt OR filetype:csv', "type": "Docs/Collab", "why": "Document hunting by keyword."}, {"q": f'"{desc}" site:archive.org', "type": "Docs/Collab", "why": "Wayback/Archive artifacts."}, {"q": f'"{desc}" intitle:"index of"', "type": "Directory/Index", "why": "Open listings that may contain files."}, ] TYPED_DORK_MAP: Dict[str, Any] = { "Email Address": typed_dorks_for_email, "Domain / Website": typed_dorks_for_domain, "IP Address": typed_dorks_for_ip, "Username / Handle": typed_dorks_for_username, "Named Individual": typed_dorks_for_person, "Organization / Company": typed_dorks_for_org, "Location": typed_dorks_for_location, "File / Image": typed_dorks_for_file, } # --------------------------- # STEP 1: Explainer # --------------------------- def render_dorks_explainer(entity_type: str, entity_value: str): st.subheader("Step 1: Dork Explainer") st.caption("These are categorized OSINT search operators. Copy/paste into Google if you like; this app automates via DuckDuckGo to respect ToS.") with st.expander("Dork categories explained", expanded=False): for t, desc in DORK_TYPES.items(): st.markdown(f"**{t}** — {desc}") builder = TYPED_DORK_MAP.get(entity_type) typed = builder(entity_value) if (builder and entity_value) else [] if not typed: st.info("Enter an entity value above to see a tailored catalog.") return for d in typed: st.markdown(f"- **[{d['type']}]** `{d['q']}`") st.markdown(f" {d['why']}", unsafe_allow_html=True) # --------------------------- # STEP 2: Advisor (LLM-powered with rules fallback) # --------------------------- # Goal weights for rules-based fallback / blending GOAL_WEIGHTS: Dict[str, Dict[str, int]] = { "Map footprint / surface": {"Footprinting": 3, "Directory/Index": 2}, "Find documents & spreadsheets": {"Docs/Collab": 3, "Directory/Index": 2}, "Discover code & credentials": {"Code/Repo": 3, "Credentials/Secrets": 3, "Directory/Index": 2}, "Identify breaches/leaks": {"Exposure/Leak": 3, "Credentials/Secrets": 2}, "Find people & org info": {"People/Profiles": 3, "Regulatory/Legal": 2}, "Track incidents / risk": {"Incidents/Risk": 3}, "Academic/technical trails": {"Academic/Research": 3}, } DEFAULT_GOALS = list(GOAL_WEIGHTS.keys()) MODEL_ID_MAP = { "qwen2.5-1.5b-instruct": "Qwen/Qwen2.5-1.5B-Instruct", "phi-3-mini-4k-instruct": "microsoft/phi-3-mini-4k-instruct", "gemma-2-2b-it": "google/gemma-2-2b-it", } # --------------------------- # Known Facts Model # --------------------------- @dataclass class KnownFacts: handles: List[str] real_names: List[str] emails: List[str] domains: List[str] ips: List[str] locations: List[str] orgs: List[str] context: str @classmethod def from_session(cls) -> "KnownFacts": return st.session_state.get("known_facts") or cls([], [], [], [], [], [], [], "") def _parse_csv(s: str) -> List[str]: return [x.strip() for x in (s or "").split(",") if x.strip()] def _known_facts_ui(): st.subheader("Known Facts / Prior Intelligence") st.caption("Provide what you already know. This seeds scoring & generation.") col_a, col_b, col_c = st.columns(3) with col_a: handles = st.text_area("Handles / Usernames (comma)", key="kf_handles", height=70) emails = st.text_area("Emails (comma)", key="kf_emails", height=70) ips = st.text_area("IP addresses (comma)", key="kf_ips", height=70) with col_b: real_names = st.text_area("Real Names (comma)", key="kf_real_names", height=70, help="Full names or key name variants") domains = st.text_area("Domains (comma)", key="kf_domains", height=70) orgs = st.text_area("Organizations (comma)", key="kf_orgs", height=70) with col_c: locations = st.text_area("Locations (comma)", key="kf_locations", height=70) context = st.text_area("Context / Keywords", key="kf_context", height=160, help="Free-text mission context, tech stack, roles, etc.") if st.button("Save Known Facts", key="btn_save_facts"): facts = KnownFacts( handles=_parse_csv(handles), real_names=_parse_csv(real_names), emails=_parse_csv(emails), domains=_parse_csv(domains), ips=_parse_csv(ips), locations=_parse_csv(locations), orgs=_parse_csv(orgs), context=context.strip(), ) st.session_state["known_facts"] = facts st.success("Facts saved (session only).") facts = KnownFacts.from_session() st.markdown(f"**Current facts loaded:** {len(facts.handles)} handles, {len(facts.emails)} emails, {len(facts.domains)} domains, {len(facts.real_names)} names.") st.markdown("---") st.markdown("### Candidate Generation") st.caption("Generate permutations / derived candidates from known facts.") if st.button("Generate Candidates", key="btn_gen_candidates"): facts = KnownFacts.from_session() usernames = set(facts.handles) # simple mutations for h in list(usernames): for suf in ["123", "01", "_sec", "_research", "-dev"]: usernames.add(h + suf) if h.isalpha(): usernames.add(h + "1") # email permutations (if have names + domains) emails = set(facts.emails) if facts.real_names and facts.domains: first = facts.real_names[0].split()[0].lower() last = facts.real_names[0].split()[-1].lower() for d in facts.domains[:3]: emails.update({ f"{first}.{last}@{d}", f"{first}{last}@{d}", f"{first[0]}{last}@{d}", f"{first}_{last}@{d}", }) # domain variants (very light) dom_vars = set(facts.domains) for d in facts.domains: if d.count('.') >= 1: root = d.split('.')[0] tld = d.split('.')[-1] dom_vars.add(root + "-dev." + tld) dom_vars.add(root + "-staging." + tld) st.session_state["generated_candidates"] = { "usernames": sorted(list(usernames))[:100], "emails": sorted(list(emails))[:100], "domains": sorted(list(dom_vars))[:100] } st.success("Candidates generated.") cand = st.session_state.get("generated_candidates") if cand: st.write("Usernames (sample)", cand["usernames"][:10]) st.write("Emails (sample)", cand["emails"][:10]) st.write("Domains (sample)", cand["domains"][:10]) if st.button("Add All Candidates to Facts", key="btn_add_cand"): facts = KnownFacts.from_session() facts.handles = sorted(list(set(facts.handles + cand["usernames"]))) facts.emails = sorted(list(set(facts.emails + cand["emails"]))) facts.domains = sorted(list(set(facts.domains + cand["domains"]))) st.session_state["known_facts"] = facts st.success("Candidates merged into facts.") def _generate_investigation_plan(entity_type: str, entity_value: str, facts: KnownFacts) -> Dict[str, Any]: """Produce a structured investigation plan based on current facts and target type.""" objectives = [ "Establish definitive identifiers (emails, handles, domains) to anchor pivots", "Map exposed surface (sites, code, documents, credentials indicators)", "Correlate identities across platforms and artifacts", "Identify signs of exposure, breach, or sensitive data leakage", "Prioritize high-confidence findings for deeper manual review" ] # Gap analysis gaps = [] if not facts.emails: gaps.append("No confirmed email addresses") if not facts.handles: gaps.append("No social/developer handles") if not facts.domains and entity_type != "Domain / Website": gaps.append("No related domains captured") if not facts.real_names and entity_type in ("Named Individual", "Organization / Company"): gaps.append("No individual name variants") if not facts.orgs and entity_type == "Named Individual": gaps.append("No employing organizations") if not facts.context: gaps.append("Context / mission keywords empty (reduces scoring nuance)") if not gaps: gaps = ["Current fact set sufficient for first enumeration pass"] # Phase recommendations phases: List[Dict[str, Any]] = [] phases.append({ "phase": "Phase 1 - Baseline & Fact Hardening", "goals": ["Normalize entity value", "Collect canonical facts", "Note obvious pivots"], "actions": [ "Record primary identifier in Known Facts", "Add any immediately known emails, domains, handles", "Capture mission / context keywords (tech stack, industry, roles)", "Run Advisor for broad Footprinting and People queries" ] }) phases.append({ "phase": "Phase 2 - Surface Enumeration", "goals": ["Map public assets", "Discover documents & code"], "actions": [ "Select dorks: site:, filetype:, intitle:'index of' variations", "Enumerate repo references (GitHub/GitLab) and note unique strings", "Pull down high-signal docs (PDF/DOCX) and extract metadata for hidden emails/handles" ] }) phases.append({ "phase": "Phase 3 - Identity Correlation", "goals": ["Link handles to emails", "Find cross-platform reuse"], "actions": [ "Search handles with platform-specific queries (social + developer)", "Leverage resume / CV / speaker page dorks for name-email alignment", "Add newly confirmed identifiers back into Known Facts and re-score" ] }) phases.append({ "phase": "Phase 4 - Exposure & Risk Signals", "goals": ["Detect leak indicators", "Prioritize potential sensitive exposure"], "actions": [ "Run leak / breach / paste oriented dorks including credential keywords", "Inspect any pastebin / gist / artifact snippets for policy or secret references", "Flag findings with multiple co-occurring identifiers for manual escalation" ] }) phases.append({ "phase": "Phase 5 - Consolidation & Reporting", "goals": ["Score & rank findings", "Produce exportable report"], "actions": [ "Re-score after final fact enrichment", "Visualize graph to ensure high-score nodes connect multiple anchors", "Export HTML report and retain audit log", "Document residual gaps & next potential pivots (e.g., historical archives, certificate transparency)" ] }) return { "entity_type": entity_type, "entity_value": entity_value, "objectives": objectives, "gaps": gaps, "phases": phases, "facts_snapshot": facts.__dict__, } def render_investigation_plan(entity_type: str, entity_value: str): st.subheader("Investigation Plan") facts = KnownFacts.from_session() plan = _generate_investigation_plan(entity_type, entity_value, facts) st.markdown("### Core Objectives") for o in plan["objectives"]: st.markdown(f"- {o}") st.markdown("### Current Gaps") for g in plan["gaps"]: st.markdown(f"- {g}") st.markdown("### Phased Approach") for ph in plan["phases"]: with st.expander(ph["phase"], expanded=False): st.markdown("**Goals**") for g in ph["goals"]: st.markdown(f"- {g}") st.markdown("**Actions**") for a in ph["actions"]: st.markdown(f"- {a}") if st.button("Export Plan (Markdown)", key="btn_export_plan"): md_lines = [f"# Investigation Plan: {plan['entity_type']} — {plan['entity_value']}", "", "## Objectives"] md_lines += [f"- {o}" for o in plan["objectives"]] md_lines += ["", "## Gaps"] + [f"- {g}" for g in plan["gaps"]] md_lines += ["", "## Phases"] for ph in plan["phases"]: md_lines.append(f"### {ph['phase']}") md_lines.append("**Goals**") md_lines += [f"- {g}" for g in ph["goals"]] md_lines.append("**Actions**") md_lines += [f"- {a}" for a in ph["actions"]] md_lines.append("") md = "\n".join(md_lines) st.download_button("Download Plan", md, file_name="investigation_plan.md", mime="text/markdown") def _score_dork_rule(d: TypedDork, goals: List[str], user_note: str) -> float: s = 1.0 for g in goals: for cat, w in GOAL_WEIGHTS.get(g, {}).items(): if d["type"] == cat: s += w note = (user_note or "").lower() if any(k in note for k in ["password", "credential", "secret", "token"]): if d["type"] in {"Credentials/Secrets", "Code/Repo", "Directory/Index"}: s += 1.5 if any(k in note for k in ["resume", "cv", "employee", "contact"]): if d["type"] in {"People/Profiles"}: s += 1.0 if any(k in note for k in ["breach", "leak", "dump", "paste"]): if d["type"] in {"Exposure/Leak", "Credentials/Secrets"}: s += 1.5 if any(k in note for k in ["paper", "research", "doi", "citation"]): if d["type"] in {"Academic/Research"}: s += 1.0 return s def _recommend_rules(entity_type: str, entity_value: str, goals: List[str], user_note: str, top_k: int = 10) -> List[TypedDork]: builder = TYPED_DORK_MAP.get(entity_type) typed = builder(entity_value) if (builder and entity_value) else [] ranked = sorted(typed, key=lambda d: _score_dork_rule(d, goals, user_note), reverse=True) return ranked[:top_k] def _safe_json_list(txt: str) -> List[Dict[str, Any]]: """Best-effort extraction of a JSON list from raw LLM text or user input. Strategy: 1. Strip surrounding markdown code fences (with or without language tag). 2. Attempt direct json.loads. 3. Locate outermost '[' ... ']' span and attempt parse. Returns [] on any failure or non-list root. """ if not txt: return [] s = txt.strip() # Remove markdown fences like ```json ... ``` if s.startswith("```"): lines = s.split("\n") # drop first fence line lines = lines[1:] # drop trailing fence line if present if lines and lines[-1].strip() == "```": lines = lines[:-1] s = "\n".join(lines).strip() # Try direct parse try: data = json.loads(s) if isinstance(data, list): return data # type: ignore[return-value] except Exception: pass # Fallback: largest bracketed list slice start = s.find("[") end = s.rfind("]") if start != -1 and end != -1 and end > start: candidate = s[start:end+1] try: data = json.loads(candidate) if isinstance(data, list): return data # type: ignore[return-value] except Exception: pass return [] def _hf_infer(model_id: str, prompt: str, max_new_tokens: int = 384, temperature: float = 0.2) -> Optional[str]: """Call Hugging Face Inference API if token & requests available. Returns generated text or None (which triggers rule-based fallback).""" if requests is None: st.warning("'requests' not installed; cannot call Hugging Face Inference API. Falling back to rules.") return None api_token = os.getenv("HF_API_TOKEN") if not api_token: st.warning("HF_API_TOKEN not set. Add it as a secret/environment variable to enable LLM advisor. Falling back to rules.") return None url = f"https://api-inference.huggingface.co/models/{model_id}" headers = {"Authorization": f"Bearer {api_token}"} payload = { "inputs": prompt, "parameters": { "max_new_tokens": max_new_tokens, "temperature": temperature, "return_full_text": False, }, } try: resp = requests.post(url, headers=headers, json=payload, timeout=90) resp.raise_for_status() data = resp.json() if isinstance(data, list) and data and isinstance(data[0], dict) and "generated_text" in data[0]: return data[0]["generated_text"] if isinstance(data, dict) and "generated_text" in data: return data["generated_text"] # Unknown shape: return serialized return json.dumps(data) except Exception as e: st.warning(f"HF inference error: {e}. Falling back to rules.") return None def _build_llm_prompt(entity_type: str, entity_value: str, goals: List[str], hint: str, baseline: List[TypedDork], top_k: int) -> str: cat_list = ", ".join(sorted(DORK_TYPES.keys())) baseline_lines = "\n".join([f"- {d['type']}: {d['q']} // {d['why']}" for d in baseline[:25]]) return f""" You are an OSINT assistant that crafts focused Google dorks. Given the entity type and value, the user's goals, and an optional hint, return a JSON array (and ONLY a JSON array) of up to {top_k} objects with this schema: {{"q": "Entity Type: {{ entity_type }}
Entity Value: {{ entity_value }}
Generated: {{ generated }} UTC
{{ facts_json }}
| Score | Level | Title | URL | Reliability | Explanation |
|---|---|---|---|---|---|
| {{ f.score }} | {{ f.level }} | {{ f.title }} | link | {{ f.reliability }} | {{ f.explanation }} |
| Score | Level | Title | URL | Reliab. | Explanation (truncated) |
|---|
You: {turn['user']}
", unsafe_allow_html=True) st.markdown(f"Inv: {turn['assistant']}
", unsafe_allow_html=True) else: st.markdown("Need a lead? Ask me about dorks, scoring, or pivots.
", unsafe_allow_html=True) # Input form with st.form("chat_form", clear_on_submit=True): q = st.text_area("Message", key="chat_input_area", height=70, label_visibility="collapsed") col_a, col_b, col_c, col_d = st.columns(4) send = False with col_a: if st.form_submit_button("Send"): send = True with col_b: if st.form_submit_button("Dorks"): q = "What dorks should I run next?"; send = True with col_c: if st.form_submit_button("Confidence"): q = "How do I improve confidence now?"; send = True with col_d: if st.form_submit_button("Pivot"): q = "Give me a pivot strategy."; send = True if send and q.strip(): reply: Optional[str] = None if st.session_state.get("settings", {}).get("model") and os.getenv("HF_API_TOKEN"): convo = st.session_state["chat_history"][-6:] history_str = "\n".join([f"User: {h['user']}\nAssistant: {h['assistant']}" for h in convo if h.get('assistant')]) prompt = ( f"{GUIDE_SYSTEM}\nCurrentContext: {_summarize_context(entity_type, entity_value)}\n" + history_str + f"\nUser: {q}\nAssistant:") reply = _hf_infer(MODEL_ID_MAP.get(st.session_state["settings"]["model"], st.session_state["settings"]["model"]), prompt, max_new_tokens=190, temperature=0.35) if not reply: reply = _rule_based_reply(q, entity_type, entity_value) st.session_state["chat_history"].append({"user": q, "assistant": reply}) st.markdown("