279 lines
8.1 KiB
Python
279 lines
8.1 KiB
Python
|
|
"""
|
||
|
|
TechScout CLI
|
||
|
|
|
||
|
|
Command-line interface for technology scouting.
|
||
|
|
"""
|
||
|
|
|
||
|
|
import argparse
|
||
|
|
import logging
|
||
|
|
import sys
|
||
|
|
import json
|
||
|
|
from pathlib import Path
|
||
|
|
|
||
|
|
from .config import config
|
||
|
|
from .pipeline.discovery import DiscoveryPipeline
|
||
|
|
from .pipeline.deep_dive import DeepDivePipeline
|
||
|
|
from .extraction.llm_client import OllamaClient
|
||
|
|
|
||
|
|
logging.basicConfig(
|
||
|
|
level=logging.INFO,
|
||
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||
|
|
)
|
||
|
|
logger = logging.getLogger(__name__)
|
||
|
|
|
||
|
|
|
||
|
|
def cmd_discover(args):
|
||
|
|
"""Run Phase 1 discovery."""
|
||
|
|
print(f"\n{'='*60}")
|
||
|
|
print("TECHSCOUT - Phase 1: Discovery")
|
||
|
|
print(f"{'='*60}\n")
|
||
|
|
|
||
|
|
pipeline = DiscoveryPipeline(model=args.model)
|
||
|
|
|
||
|
|
print(f"Capability Gap: {args.query}\n")
|
||
|
|
print("Starting discovery process...\n")
|
||
|
|
|
||
|
|
result = pipeline.discover(
|
||
|
|
capability_gap=args.query,
|
||
|
|
max_results=args.max_results,
|
||
|
|
use_llm_scoring=not args.fast
|
||
|
|
)
|
||
|
|
|
||
|
|
if not result.success:
|
||
|
|
print(f"\nError: {result.error}")
|
||
|
|
return
|
||
|
|
|
||
|
|
# Display results
|
||
|
|
print(f"\n{'='*60}")
|
||
|
|
print("DISCOVERY RESULTS")
|
||
|
|
print(f"{'='*60}\n")
|
||
|
|
|
||
|
|
print(f"Query Understanding:")
|
||
|
|
print(f" {result.decomposition.get('understanding', 'N/A')}\n")
|
||
|
|
|
||
|
|
print(f"Technical Domains: {', '.join(result.decomposition.get('technical_domains', []))}\n")
|
||
|
|
|
||
|
|
print(f"Source Statistics:")
|
||
|
|
for source, count in result.source_stats.items():
|
||
|
|
print(f" - {source}: {count} results")
|
||
|
|
|
||
|
|
print(f"\nTotal Results: {result.total_results_found}")
|
||
|
|
print(f"Search Time: {result.search_duration_seconds:.1f}s\n")
|
||
|
|
|
||
|
|
print(f"TOP {len(result.candidates)} CANDIDATES:")
|
||
|
|
print("-" * 60)
|
||
|
|
|
||
|
|
for i, candidate in enumerate(result.candidates[:20], 1):
|
||
|
|
print(f"\n{i}. {candidate.title[:70]}")
|
||
|
|
print(f" Organization: {candidate.organization}")
|
||
|
|
print(f" Source: {candidate.source_type} | Score: {candidate.score:.2f}")
|
||
|
|
if candidate.trl_estimate:
|
||
|
|
print(f" TRL: {candidate.trl_estimate}")
|
||
|
|
if candidate.award_amount:
|
||
|
|
print(f" Award: ${candidate.award_amount:,.0f}")
|
||
|
|
print(f" URL: {candidate.url[:60]}...")
|
||
|
|
|
||
|
|
print(f"\n\nResults saved to: {config.analyses_dir / f'discovery_{result.id}.json'}")
|
||
|
|
|
||
|
|
|
||
|
|
def cmd_deepdive(args):
|
||
|
|
"""Run Phase 2 deep dive."""
|
||
|
|
print(f"\n{'='*60}")
|
||
|
|
print("TECHSCOUT - Phase 2: Deep Dive")
|
||
|
|
print(f"{'='*60}\n")
|
||
|
|
|
||
|
|
pipeline = DeepDivePipeline(model=args.model)
|
||
|
|
|
||
|
|
print(f"Organization: {args.organization}")
|
||
|
|
print(f"Technology: {args.technology}\n")
|
||
|
|
print("Starting deep dive analysis...\n")
|
||
|
|
|
||
|
|
result = pipeline.deep_dive(
|
||
|
|
organization=args.organization,
|
||
|
|
technology_context=args.technology,
|
||
|
|
capability_gap=args.gap
|
||
|
|
)
|
||
|
|
|
||
|
|
if not result.success:
|
||
|
|
print(f"\nError: {result.error}")
|
||
|
|
return
|
||
|
|
|
||
|
|
# Display results
|
||
|
|
print(f"\n{'='*60}")
|
||
|
|
print("DEEP DIVE RESULTS")
|
||
|
|
print(f"{'='*60}\n")
|
||
|
|
|
||
|
|
print("COMPANY PROFILE")
|
||
|
|
print("-" * 40)
|
||
|
|
print(f"Name: {result.company_profile.name}")
|
||
|
|
print(f"Description: {result.company_profile.description}")
|
||
|
|
print(f"Headquarters: {result.company_profile.headquarters or 'Unknown'}")
|
||
|
|
print(f"Founded: {result.company_profile.founded or 'Unknown'}")
|
||
|
|
if result.company_profile.leadership:
|
||
|
|
print("Leadership:")
|
||
|
|
for leader in result.company_profile.leadership[:3]:
|
||
|
|
print(f" - {leader.get('name', 'Unknown')}: {leader.get('title', 'Unknown')}")
|
||
|
|
|
||
|
|
print(f"\nTECHNOLOGY PROFILE")
|
||
|
|
print("-" * 40)
|
||
|
|
print(f"Technology: {result.technology_profile.name}")
|
||
|
|
print(f"TRL Assessment: {result.technology_profile.trl_assessment}")
|
||
|
|
print(f"Approach: {result.technology_profile.technical_approach[:200]}...")
|
||
|
|
print(f"Key Capabilities:")
|
||
|
|
for cap in result.technology_profile.key_capabilities[:5]:
|
||
|
|
print(f" - {cap}")
|
||
|
|
print(f"Competitive Advantage: {result.technology_profile.competitive_advantage}")
|
||
|
|
|
||
|
|
print(f"\nCONTRACT HISTORY")
|
||
|
|
print("-" * 40)
|
||
|
|
print(f"Total Contracts: {result.contract_history.total_contracts}")
|
||
|
|
print(f"Total Value: ${result.contract_history.total_value:,.0f}")
|
||
|
|
print(f"Primary Agencies: {', '.join(result.contract_history.primary_agencies[:3])}")
|
||
|
|
print(f"SBIR Awards: {len(result.contract_history.sbir_awards)}")
|
||
|
|
|
||
|
|
print(f"\nASSESSMENT")
|
||
|
|
print("-" * 40)
|
||
|
|
print(result.assessment)
|
||
|
|
|
||
|
|
print(f"\nRISK FACTORS")
|
||
|
|
for risk in result.risk_factors:
|
||
|
|
print(f" - {risk}")
|
||
|
|
|
||
|
|
print(f"\nRECOMMENDATION: {result.recommendation}")
|
||
|
|
|
||
|
|
print(f"\n\nResults saved to: {config.analyses_dir / f'deepdive_{result.id}.json'}")
|
||
|
|
|
||
|
|
|
||
|
|
def cmd_list(args):
|
||
|
|
"""List saved analyses."""
|
||
|
|
print("\nSaved Analyses:")
|
||
|
|
print("-" * 60)
|
||
|
|
|
||
|
|
discoveries = list(config.analyses_dir.glob("discovery_*.json"))
|
||
|
|
deep_dives = list(config.analyses_dir.glob("deepdive_*.json"))
|
||
|
|
|
||
|
|
if discoveries:
|
||
|
|
print("\nPhase 1 - Discovery:")
|
||
|
|
for f in sorted(discoveries, reverse=True)[:10]:
|
||
|
|
with open(f) as fp:
|
||
|
|
data = json.load(fp)
|
||
|
|
print(f" {f.stem}: {data.get('capability_gap', 'Unknown')[:50]}...")
|
||
|
|
|
||
|
|
if deep_dives:
|
||
|
|
print("\nPhase 2 - Deep Dive:")
|
||
|
|
for f in sorted(deep_dives, reverse=True)[:10]:
|
||
|
|
with open(f) as fp:
|
||
|
|
data = json.load(fp)
|
||
|
|
print(f" {f.stem}: {data.get('organization', 'Unknown')}")
|
||
|
|
|
||
|
|
if not discoveries and not deep_dives:
|
||
|
|
print(" No analyses found.")
|
||
|
|
|
||
|
|
|
||
|
|
def cmd_check(args):
|
||
|
|
"""Check system status."""
|
||
|
|
print("\nTechScout System Check")
|
||
|
|
print("-" * 40)
|
||
|
|
|
||
|
|
# Check Ollama
|
||
|
|
client = OllamaClient()
|
||
|
|
if client.is_available():
|
||
|
|
print("Ollama: RUNNING")
|
||
|
|
models = client.list_models()
|
||
|
|
print(f" Available models: {', '.join(models[:5])}")
|
||
|
|
else:
|
||
|
|
print("Ollama: NOT RUNNING")
|
||
|
|
print(" Please start Ollama: ollama serve")
|
||
|
|
|
||
|
|
# Check directories
|
||
|
|
print(f"\nDirectories:")
|
||
|
|
print(f" Analyses: {config.analyses_dir} ({'exists' if config.analyses_dir.exists() else 'MISSING'})")
|
||
|
|
print(f" Exports: {config.exports_dir} ({'exists' if config.exports_dir.exists() else 'MISSING'})")
|
||
|
|
|
||
|
|
|
||
|
|
def main():
|
||
|
|
parser = argparse.ArgumentParser(
|
||
|
|
description="TechScout - Technology Scouting & Capability Gap Analysis"
|
||
|
|
)
|
||
|
|
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
||
|
|
|
||
|
|
# Discover command
|
||
|
|
discover_parser = subparsers.add_parser(
|
||
|
|
"discover",
|
||
|
|
help="Phase 1: Discover technologies for a capability gap"
|
||
|
|
)
|
||
|
|
discover_parser.add_argument(
|
||
|
|
"query",
|
||
|
|
help="Natural language capability gap description"
|
||
|
|
)
|
||
|
|
discover_parser.add_argument(
|
||
|
|
"--model", "-m",
|
||
|
|
default="mistral-nemo:12b",
|
||
|
|
help="Ollama model to use"
|
||
|
|
)
|
||
|
|
discover_parser.add_argument(
|
||
|
|
"--max-results", "-n",
|
||
|
|
type=int,
|
||
|
|
default=50,
|
||
|
|
help="Maximum candidates to return"
|
||
|
|
)
|
||
|
|
discover_parser.add_argument(
|
||
|
|
"--fast",
|
||
|
|
action="store_true",
|
||
|
|
help="Skip LLM scoring for faster results"
|
||
|
|
)
|
||
|
|
|
||
|
|
# Deep dive command
|
||
|
|
deepdive_parser = subparsers.add_parser(
|
||
|
|
"deepdive",
|
||
|
|
help="Phase 2: Deep dive into a company/technology"
|
||
|
|
)
|
||
|
|
deepdive_parser.add_argument(
|
||
|
|
"organization",
|
||
|
|
help="Company name to investigate"
|
||
|
|
)
|
||
|
|
deepdive_parser.add_argument(
|
||
|
|
"--technology", "-t",
|
||
|
|
required=True,
|
||
|
|
help="Technology context/description"
|
||
|
|
)
|
||
|
|
deepdive_parser.add_argument(
|
||
|
|
"--gap", "-g",
|
||
|
|
required=True,
|
||
|
|
help="Original capability gap"
|
||
|
|
)
|
||
|
|
deepdive_parser.add_argument(
|
||
|
|
"--model", "-m",
|
||
|
|
default="mistral-nemo:12b",
|
||
|
|
help="Ollama model to use"
|
||
|
|
)
|
||
|
|
|
||
|
|
# List command
|
||
|
|
list_parser = subparsers.add_parser(
|
||
|
|
"list",
|
||
|
|
help="List saved analyses"
|
||
|
|
)
|
||
|
|
|
||
|
|
# Check command
|
||
|
|
check_parser = subparsers.add_parser(
|
||
|
|
"check",
|
||
|
|
help="Check system status"
|
||
|
|
)
|
||
|
|
|
||
|
|
args = parser.parse_args()
|
||
|
|
|
||
|
|
if args.command == "discover":
|
||
|
|
cmd_discover(args)
|
||
|
|
elif args.command == "deepdive":
|
||
|
|
cmd_deepdive(args)
|
||
|
|
elif args.command == "list":
|
||
|
|
cmd_list(args)
|
||
|
|
elif args.command == "check":
|
||
|
|
cmd_check(args)
|
||
|
|
else:
|
||
|
|
parser.print_help()
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
main()
|