diff --git a/README.md b/README.md index 8789791..c920367 100644 --- a/README.md +++ b/README.md @@ -8,14 +8,15 @@ Command-line interface for Firecrawl. Scrape, crawl, and extract data from any w npm install -g firecrawl-cli ``` -Or set up everything in one command (install CLI globally, authenticate, and add skills): +Or set up everything in one command (install CLI globally, authenticate, and add skills across all detected coding editors): ```bash -npx -y firecrawl-cli@latest init --all --browser +npx -y firecrawl-cli@latest init -y --browser ``` -- `--all` installs the firecrawl skill to every detected AI coding agent +- `-y` runs setup non-interactively - `--browser` opens the browser for Firecrawl authentication automatically +- skills install globally to every detected AI coding agent by default ### Setup Skills and MCP @@ -25,6 +26,8 @@ If you are using an AI coding agent like Claude Code, you can also install the s firecrawl setup skills ``` +This installs skills globally across all detected coding editors by default. Use `--agent ` to scope it to one editor. + To install the Firecrawl MCP server into your editors (Cursor, Claude Code, VS Code, etc.): ```bash @@ -34,7 +37,7 @@ firecrawl setup mcp Or directly via npx: ```bash -npx skills add firecrawl/cli +npx skills add firecrawl/cli --full-depth --global --all npx add-mcp "npx -y firecrawl-mcp" --name firecrawl ``` diff --git a/skills/firecrawl-agent/SKILL.md b/skills/firecrawl-agent/SKILL.md new file mode 100644 index 0000000..0b3577f --- /dev/null +++ b/skills/firecrawl-agent/SKILL.md @@ -0,0 +1,57 @@ +--- +name: firecrawl-agent +description: | + AI-powered autonomous data extraction that navigates complex sites and returns structured JSON. Use this skill when the user wants structured data from websites, needs to extract pricing tiers, product listings, directory entries, or any data as JSON with a schema. Triggers on "extract structured data", "get all the products", "pull pricing info", "extract as JSON", or when the user provides a JSON schema for website data. More powerful than simple scraping for multi-page structured extraction. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl agent + +AI-powered autonomous extraction. The agent navigates sites and extracts structured data (takes 2-5 minutes). + +## When to use + +- You need structured data from complex multi-page sites +- Manual scraping would require navigating many pages +- You want the AI to figure out where the data lives + +## Quick start + +```bash +# Extract structured data +firecrawl agent "extract all pricing tiers" --wait -o .firecrawl/pricing.json + +# With a JSON schema for structured output +firecrawl agent "extract products" --schema '{"type":"object","properties":{"name":{"type":"string"},"price":{"type":"number"}}}' --wait -o .firecrawl/products.json + +# Focus on specific pages +firecrawl agent "get feature list" --urls "" --wait -o .firecrawl/features.json +``` + +## Options + +| Option | Description | +| ---------------------- | ----------------------------------------- | +| `--urls ` | Starting URLs for the agent | +| `--model ` | Model to use: spark-1-mini or spark-1-pro | +| `--schema ` | JSON schema for structured output | +| `--schema-file ` | Path to JSON schema file | +| `--max-credits ` | Credit limit for this agent run | +| `--wait` | Wait for agent to complete | +| `--pretty` | Pretty print JSON output | +| `-o, --output ` | Output file path | + +## Tips + +- Always use `--wait` to get results inline. Without it, returns a job ID. +- Use `--schema` for predictable, structured output — otherwise the agent returns freeform data. +- Agent runs consume more credits than simple scrapes. Use `--max-credits` to cap spending. +- For simple single-page extraction, prefer `scrape` — it's faster and cheaper. + +## See also + +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — simpler single-page extraction +- [firecrawl-browser](../firecrawl-browser/SKILL.md) — manual browser automation (more control) +- [firecrawl-crawl](../firecrawl-crawl/SKILL.md) — bulk extraction without AI diff --git a/skills/firecrawl-browser/SKILL.md b/skills/firecrawl-browser/SKILL.md new file mode 100644 index 0000000..ed0f0ae --- /dev/null +++ b/skills/firecrawl-browser/SKILL.md @@ -0,0 +1,107 @@ +--- +name: firecrawl-browser +description: | + Cloud browser automation for pages requiring interaction — clicks, form fills, login, pagination, infinite scroll. Use this skill when the user needs to interact with a webpage, log into a site, click buttons, fill forms, navigate multi-step flows, handle pagination, or when regular scraping fails because content requires JavaScript interaction. Triggers on "click", "fill out the form", "log in to", "paginated", "infinite scroll", "interact with the page", or "scrape failed". Provides remote Chromium sessions with persistent profiles. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl browser + +Cloud Chromium sessions in Firecrawl's remote sandboxed environment. Interact with pages that require clicks, form fills, pagination, or login. + +## When to use + +- Content requires interaction: clicks, form fills, pagination, login +- `scrape` failed because content is behind JavaScript interaction +- You need to navigate a multi-step flow +- Last resort in the [workflow escalation pattern](firecrawl-cli): search → scrape → map → crawl → **browser** +- **Never use browser for web searches** — use `search` instead + +## Quick start + +```bash +# Typical browser workflow +firecrawl browser "open " +firecrawl browser "snapshot -i" # see interactive elements with @ref IDs +firecrawl browser "click @e5" # interact with elements +firecrawl browser "fill @e3 'search query'" # fill form fields +firecrawl browser "scrape" -o .firecrawl/page.md # extract content +firecrawl browser close +``` + +Shorthand auto-launches a session if none exists — no setup required. + +## Commands + +| Command | Description | +| -------------------- | ---------------------------------------- | +| `open ` | Navigate to a URL | +| `snapshot -i` | Get interactive elements with `@ref` IDs | +| `screenshot` | Capture a PNG screenshot | +| `click <@ref>` | Click an element by ref | +| `type <@ref> ` | Type into an element | +| `fill <@ref> ` | Fill a form field (clears first) | +| `scrape` | Extract page content as markdown | +| `scroll ` | Scroll up/down/left/right | +| `wait ` | Wait for a duration | +| `eval ` | Evaluate JavaScript on the page | + +Session management: `launch-session --ttl 600`, `list`, `close` + +## Options + +| Option | Description | +| ---------------------------- | -------------------------------------------------- | +| `--ttl ` | Session time-to-live | +| `--ttl-inactivity ` | Inactivity timeout | +| `--session ` | Use a specific session ID | +| `--profile ` | Use a named profile (persists state) | +| `--no-save-changes` | Read-only reconnect (don't write to session state) | +| `-o, --output ` | Output file path | + +## Profiles + +Profiles survive close and can be reconnected by name. Use them for login-then-work flows: + +```bash +# Session 1: Login and save state +firecrawl browser launch-session --profile my-app +firecrawl browser "open https://app.example.com/login" +firecrawl browser "snapshot -i" +firecrawl browser "fill @e3 'user@example.com'" +firecrawl browser "click @e7" +firecrawl browser "wait 2" +firecrawl browser close + +# Session 2: Come back authenticated +firecrawl browser launch-session --profile my-app +firecrawl browser "open https://app.example.com/dashboard" +firecrawl browser "scrape" -o .firecrawl/dashboard.md +firecrawl browser close +``` + +Read-only reconnect (no writes to session state): + +```bash +firecrawl browser launch-session --profile my-app --no-save-changes +``` + +Shorthand with profile: + +```bash +firecrawl browser --profile my-app "open https://example.com" +``` + +## Tips + +- If you get forbidden errors, the session may have expired — create a new one. +- For parallel browser work, launch separate sessions and operate them via `--session `. +- Always `close` sessions when done to free resources. + +## See also + +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — try scrape first, escalate to browser only when needed +- [firecrawl-search](../firecrawl-search/SKILL.md) — for web searches (never use browser for searching) +- [firecrawl-agent](../firecrawl-agent/SKILL.md) — AI-powered extraction (less manual control) diff --git a/skills/firecrawl-cli/SKILL.md b/skills/firecrawl-cli/SKILL.md index 7be3f19..eecdfbf 100644 --- a/skills/firecrawl-cli/SKILL.md +++ b/skills/firecrawl-cli/SKILL.md @@ -1,15 +1,7 @@ --- name: firecrawl description: | - Official Firecrawl CLI skill for web scraping, search, crawling, and browser automation. Returns clean, LLM-optimized markdown. - - USE FOR: - - Web search and research - - Scraping pages, docs, and articles - - Site mapping and bulk extraction - - Browser automation for interactive pages - - Must be pre-installed and authenticated. See rules/install.md for setup, rules/security.md for output handling. + Web scraping, search, crawling, and browser automation via the Firecrawl CLI. Use this skill whenever the user wants to search the web, find articles, research a topic, look something up online, scrape a webpage, grab content from a URL, extract data from a website, crawl documentation, download a site, or interact with pages that need clicks or logins. Also use when they say "fetch this page", "pull the content from", "get the page at https://", or reference scraping external websites. This provides real-time web search with full page content extraction and cloud browser automation — capabilities beyond what Claude can do natively with built-in tools. Do NOT trigger for local file operations, git commands, deployments, or code editing tasks. allowed-tools: - Bash(firecrawl *) - Bash(npx firecrawl *) @@ -52,16 +44,17 @@ Follow this escalation pattern: 4. **Crawl** - Need bulk content from an entire site section (e.g., all /docs/). 5. **Browser** - Scrape failed because content is behind interaction (pagination, modals, form submissions, multi-step navigation). -| Need | Command | When | -| --------------------------- | --------- | --------------------------------------------------------- | -| Find pages on a topic | `search` | No specific URL yet | -| Get a page's content | `scrape` | Have a URL, page is static or JS-rendered | -| Find URLs within a site | `map` | Need to locate a specific subpage | -| Bulk extract a site section | `crawl` | Need many pages (e.g., all /docs/) | -| AI-powered data extraction | `agent` | Need structured data from complex sites | -| Interact with a page | `browser` | Content requires clicks, form fills, pagination, or login | +| Need | Command | When | +| --------------------------- | ---------- | --------------------------------------------------------- | +| Find pages on a topic | `search` | No specific URL yet | +| Get a page's content | `scrape` | Have a URL, page is static or JS-rendered | +| Find URLs within a site | `map` | Need to locate a specific subpage | +| Bulk extract a site section | `crawl` | Need many pages (e.g., all /docs/) | +| AI-powered data extraction | `agent` | Need structured data from complex sites | +| Interact with a page | `browser` | Content requires clicks, form fills, pagination, or login | +| Download a site to files | `download` | Save an entire site as local files | -See also: [`download`](#download) -- a convenience command that combines `map` + `scrape` to save an entire site to local files. +For detailed command reference, use the individual skill for each command (e.g., `firecrawl-search`, `firecrawl-browser`) or run `firecrawl --help`. **Scrape vs browser:** @@ -74,53 +67,6 @@ See also: [`download`](#download) -- a convenience command that combines `map` + - `search --scrape` already fetches full page content. Don't re-scrape those URLs. - Check `.firecrawl/` for existing data before fetching again. -**Example: fetching API docs from a large site** - -``` -search "site:docs.example.com authentication API" → found the docs domain -map https://docs.example.com --search "auth" → found /docs/api/authentication -scrape https://docs.example.com/docs/api/auth... → got the content -``` - -**Example: data behind pagination** - -``` -scrape https://example.com/products → only shows first 10 items, no next-page links -browser "open https://example.com/products" → open in browser -browser "snapshot -i" → find the pagination button -browser "click @e12" → click "Next Page" -browser "scrape" -o .firecrawl/products-p2.md → extract page 2 content -``` - -**Example: login then scrape authenticated content** - -``` -browser launch-session --profile my-app → create a named profile -browser "open https://app.example.com/login" → navigate to login -browser "snapshot -i" → find form fields -browser "fill @e3 'user@example.com'" → fill email -browser "click @e7" → click Login -browser "wait 2" → wait for redirect -browser close → disconnect, state persisted - -browser launch-session --profile my-app → reconnect, cookies intact -browser "open https://app.example.com/dashboard" → already logged in -browser "scrape" -o .firecrawl/dashboard.md → extract authenticated content -browser close -``` - -**Example: research task** - -``` -search "firecrawl vs competitors 2024" --scrape -o .firecrawl/search-comparison-scraped.json - → full content already fetched for each result -grep -n "pricing\|features" .firecrawl/search-comparison-scraped.json -head -200 .firecrawl/search-comparison-scraped.json → read and process what you have - → notice a relevant URL in the content -scrape https://newsite.com/comparison -o .firecrawl/newsite-comparison.md - → only scrape this new URL -``` - ## Output & Organization Unless the user specifies to return in context, write results to `.firecrawl/` with `-o`. Add `.firecrawl/` to `.gitignore`. Always quote URLs - shell interprets `?` and `&` as special characters. @@ -147,171 +93,6 @@ grep -n "keyword" .firecrawl/file.md Single format outputs raw content. Multiple formats (e.g., `--format markdown,links`) output JSON. -## Commands - -### search - -Web search with optional content scraping. Run `firecrawl search --help` for all options. - -```bash -# Basic search -firecrawl search "your query" -o .firecrawl/result.json --json - -# Search and scrape full page content from results -firecrawl search "your query" --scrape -o .firecrawl/scraped.json --json - -# News from the past day -firecrawl search "your query" --sources news --tbs qdr:d -o .firecrawl/news.json --json -``` - -Options: `--limit `, `--sources `, `--categories `, `--tbs `, `--location`, `--country `, `--scrape`, `--scrape-formats`, `-o` - -### scrape - -Scrape one or more URLs. Multiple URLs are scraped concurrently and each result is saved to `.firecrawl/`. Run `firecrawl scrape --help` for all options. - -```bash -# Basic markdown extraction -firecrawl scrape "" -o .firecrawl/page.md - -# Main content only, no nav/footer -firecrawl scrape "" --only-main-content -o .firecrawl/page.md - -# Wait for JS to render, then scrape -firecrawl scrape "" --wait-for 3000 -o .firecrawl/page.md - -# Multiple URLs (each saved to .firecrawl/) -firecrawl scrape https://firecrawl.dev https://firecrawl.dev/blog https://docs.firecrawl.dev - -# Get markdown and links together -firecrawl scrape "" --format markdown,links -o .firecrawl/page.json -``` - -Options: `-f `, `-H`, `--only-main-content`, `--wait-for `, `--include-tags`, `--exclude-tags`, `-o` - -### map - -Discover URLs on a site. Run `firecrawl map --help` for all options. - -```bash -# Find a specific page on a large site -firecrawl map "" --search "authentication" -o .firecrawl/filtered.txt - -# Get all URLs -firecrawl map "" --limit 500 --json -o .firecrawl/urls.json -``` - -Options: `--limit `, `--search `, `--sitemap `, `--include-subdomains`, `--json`, `-o` - -### crawl - -Bulk extract from a website. Run `firecrawl crawl --help` for all options. - -```bash -# Crawl a docs section -firecrawl crawl "" --include-paths /docs --limit 50 --wait -o .firecrawl/crawl.json - -# Full crawl with depth limit -firecrawl crawl "" --max-depth 3 --wait --progress -o .firecrawl/crawl.json - -# Check status of a running crawl -firecrawl crawl -``` - -Options: `--wait`, `--progress`, `--limit `, `--max-depth `, `--include-paths`, `--exclude-paths`, `--delay `, `--max-concurrency `, `--pretty`, `-o` - -### agent - -AI-powered autonomous extraction (2-5 minutes). Run `firecrawl agent --help` for all options. - -```bash -# Extract structured data -firecrawl agent "extract all pricing tiers" --wait -o .firecrawl/pricing.json - -# With a JSON schema for structured output -firecrawl agent "extract products" --schema '{"type":"object","properties":{"name":{"type":"string"},"price":{"type":"number"}}}' --wait -o .firecrawl/products.json - -# Focus on specific pages -firecrawl agent "get feature list" --urls "" --wait -o .firecrawl/features.json -``` - -Options: `--urls`, `--model `, `--schema `, `--schema-file`, `--max-credits `, `--wait`, `--pretty`, `-o` - -### browser - -Cloud Chromium sessions in Firecrawl's remote sandboxed environment. Run `firecrawl browser --help` and `firecrawl browser "agent-browser --help"` for all options. - -```bash -# Typical browser workflow -firecrawl browser "open " -firecrawl browser "snapshot -i" # see interactive elements with @ref IDs -firecrawl browser "click @e5" # interact with elements -firecrawl browser "fill @e3 'search query'" # fill form fields -firecrawl browser "scrape" -o .firecrawl/page.md # extract content -firecrawl browser close -``` - -Shorthand auto-launches a session if none exists - no setup required. - -**Core agent-browser commands:** - -| Command | Description | -| -------------------- | ---------------------------------------- | -| `open ` | Navigate to a URL | -| `snapshot -i` | Get interactive elements with `@ref` IDs | -| `screenshot` | Capture a PNG screenshot | -| `click <@ref>` | Click an element by ref | -| `type <@ref> ` | Type into an element | -| `fill <@ref> ` | Fill a form field (clears first) | -| `scrape` | Extract page content as markdown | -| `scroll ` | Scroll up/down/left/right | -| `wait ` | Wait for a duration | -| `eval ` | Evaluate JavaScript on the page | - -Session management: `launch-session --ttl 600`, `list`, `close` - -Options: `--ttl `, `--ttl-inactivity `, `--session `, `--profile `, `--no-save-changes`, `-o` - -**Profiles** survive close and can be reconnected by name. Use them when you need to login first, then come back later to do work while already authenticated: - -```bash -# Session 1: Login and save state -firecrawl browser launch-session --profile my-app -firecrawl browser "open https://app.example.com/login" -firecrawl browser "snapshot -i" -firecrawl browser "fill @e3 'user@example.com'" -firecrawl browser "click @e7" -firecrawl browser "wait 2" -firecrawl browser close - -# Session 2: Come back authenticated -firecrawl browser launch-session --profile my-app -firecrawl browser "open https://app.example.com/dashboard" -firecrawl browser "scrape" -o .firecrawl/dashboard.md -firecrawl browser close -``` - -Read-only reconnect (no writes to session state): - -```bash -firecrawl browser launch-session --profile my-app --no-save-changes -``` - -Shorthand with profile: - -```bash -firecrawl browser --profile my-app "open https://example.com" -``` - -If you get forbidden errors in the browser, you may need to create a new session as the old one may have expired. - -### credit-usage - -```bash -firecrawl credit-usage -firecrawl credit-usage --json --pretty -o .firecrawl/credits.json -``` - ## Working with Results These patterns are useful when working with file-based output (`-o` flag) for complex tasks: @@ -337,41 +118,9 @@ wait For browser, launch separate sessions for independent tasks and operate them in parallel via `--session `. -## Bulk Download - -### download - -Convenience command that combines `map` + `scrape` to save a site as local files. Maps the site first to discover pages, then scrapes each one into nested directories under `.firecrawl/`. All scrape options work with download. Always pass `-y` to skip the confirmation prompt. Run `firecrawl download --help` for all options. +## Credit Usage ```bash -# Interactive wizard (picks format, screenshots, paths for you) -firecrawl download https://docs.firecrawl.dev - -# With screenshots -firecrawl download https://docs.firecrawl.dev --screenshot --limit 20 -y - -# Multiple formats (each saved as its own file per page) -firecrawl download https://docs.firecrawl.dev --format markdown,links --screenshot --limit 20 -y -# Creates per page: index.md + links.txt + screenshot.png - -# Filter to specific sections -firecrawl download https://docs.firecrawl.dev --include-paths "/features,/sdks" - -# Skip translations -firecrawl download https://docs.firecrawl.dev --exclude-paths "/zh,/ja,/fr,/es,/pt-BR" - -# Full combo -firecrawl download https://docs.firecrawl.dev \ - --include-paths "/features,/sdks" \ - --exclude-paths "/zh,/ja" \ - --only-main-content \ - --screenshot \ - -y +firecrawl credit-usage +firecrawl credit-usage --json --pretty -o .firecrawl/credits.json ``` - -Download options: `--limit `, `--search `, `--include-paths `, `--exclude-paths `, `--allow-subdomains`, `-y` - -Scrape options (all work with download): `-f `, `-H`, `-S`, `--screenshot`, `--full-page-screenshot`, `--only-main-content`, `--include-tags`, `--exclude-tags`, `--wait-for`, `--max-age`, `--country`, `--languages` - - - diff --git a/skills/firecrawl-cli/rules/install.md b/skills/firecrawl-cli/rules/install.md index a3d1aae..6d5379a 100644 --- a/skills/firecrawl-cli/rules/install.md +++ b/skills/firecrawl-cli/rules/install.md @@ -12,10 +12,18 @@ description: | ## Quick Setup (Recommended) ```bash -npx -y firecrawl-cli@1.8.0 init --all --browser +npx -y firecrawl-cli -y ``` -This installs `firecrawl-cli` globally and authenticates. +This installs `firecrawl-cli` globally, authenticates via browser, and installs all skills. + +Skills are installed globally across all detected coding editors by default. + +To install skills manually: + +```bash +firecrawl setup skills +``` ## Manual Install diff --git a/skills/firecrawl-crawl/SKILL.md b/skills/firecrawl-crawl/SKILL.md new file mode 100644 index 0000000..fb2f3bd --- /dev/null +++ b/skills/firecrawl-crawl/SKILL.md @@ -0,0 +1,58 @@ +--- +name: firecrawl-crawl +description: | + Bulk extract content from an entire website or site section. Use this skill when the user wants to crawl a site, extract all pages from a docs section, bulk-scrape multiple pages following links, or says "crawl", "get all the pages", "extract everything under /docs", "bulk extract", or needs content from many pages on the same site. Handles depth limits, path filtering, and concurrent extraction. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl crawl + +Bulk extract content from a website. Crawls pages following links up to a depth/limit. + +## When to use + +- You need content from many pages on a site (e.g., all `/docs/`) +- You want to extract an entire site section +- Step 4 in the [workflow escalation pattern](firecrawl-cli): search → scrape → map → **crawl** → browser + +## Quick start + +```bash +# Crawl a docs section +firecrawl crawl "" --include-paths /docs --limit 50 --wait -o .firecrawl/crawl.json + +# Full crawl with depth limit +firecrawl crawl "" --max-depth 3 --wait --progress -o .firecrawl/crawl.json + +# Check status of a running crawl +firecrawl crawl +``` + +## Options + +| Option | Description | +| ------------------------- | ------------------------------------------- | +| `--wait` | Wait for crawl to complete before returning | +| `--progress` | Show progress while waiting | +| `--limit ` | Max pages to crawl | +| `--max-depth ` | Max link depth to follow | +| `--include-paths ` | Only crawl URLs matching these paths | +| `--exclude-paths ` | Skip URLs matching these paths | +| `--delay ` | Delay between requests | +| `--max-concurrency ` | Max parallel crawl workers | +| `--pretty` | Pretty print JSON output | +| `-o, --output ` | Output file path | + +## Tips + +- Always use `--wait` when you need the results immediately. Without it, crawl returns a job ID for async polling. +- Use `--include-paths` to scope the crawl — don't crawl an entire site when you only need one section. +- Crawl consumes credits per page. Check `firecrawl credit-usage` before large crawls. + +## See also + +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — scrape individual pages +- [firecrawl-map](../firecrawl-map/SKILL.md) — discover URLs before deciding to crawl +- [firecrawl-download](../firecrawl-download/SKILL.md) — download site to local files (uses map + scrape) diff --git a/skills/firecrawl-experimental/SKILL.md b/skills/firecrawl-experimental/SKILL.md new file mode 100644 index 0000000..7ae0dc9 --- /dev/null +++ b/skills/firecrawl-experimental/SKILL.md @@ -0,0 +1,14 @@ +--- +name: firecrawl-experimental +description: | + Experimental Firecrawl commands that are in preview. Use when the user asks about experimental features, preview commands, or specifically requests "download" functionality. Contains: firecrawl-download (save entire websites as local files). +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl experimental + +Experimental commands that are in preview and may change. Currently includes: + +- **[firecrawl-download](firecrawl-download/SKILL.md)** — download an entire website as local files (markdown, screenshots, multiple formats) diff --git a/skills/firecrawl-experimental/firecrawl-download/SKILL.md b/skills/firecrawl-experimental/firecrawl-download/SKILL.md new file mode 100644 index 0000000..569ab30 --- /dev/null +++ b/skills/firecrawl-experimental/firecrawl-download/SKILL.md @@ -0,0 +1,69 @@ +--- +name: firecrawl-download +description: | + Download an entire website as local files — markdown, screenshots, or multiple formats per page. Use this skill when the user wants to save a site locally, download documentation for offline use, bulk-save pages as files, or says "download the site", "save as local files", "offline copy", "download all the docs", or "save for reference". Combines site mapping and scraping into organized local directories. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl download + +> **Experimental.** Convenience command that combines `map` + `scrape` to save an entire site as local files. + +Maps the site first to discover pages, then scrapes each one into nested directories under `.firecrawl/`. All scrape options work with download. Always pass `-y` to skip the confirmation prompt. + +## When to use + +- You want to save an entire site (or section) to local files +- You need offline access to documentation or content +- Bulk content extraction with organized file structure + +## Quick start + +```bash +# Interactive wizard (picks format, screenshots, paths for you) +firecrawl download https://docs.example.com + +# With screenshots +firecrawl download https://docs.example.com --screenshot --limit 20 -y + +# Multiple formats (each saved as its own file per page) +firecrawl download https://docs.example.com --format markdown,links --screenshot --limit 20 -y +# Creates per page: index.md + links.txt + screenshot.png + +# Filter to specific sections +firecrawl download https://docs.example.com --include-paths "/features,/sdks" + +# Skip translations +firecrawl download https://docs.example.com --exclude-paths "/zh,/ja,/fr,/es,/pt-BR" + +# Full combo +firecrawl download https://docs.example.com \ + --include-paths "/features,/sdks" \ + --exclude-paths "/zh,/ja" \ + --only-main-content \ + --screenshot \ + -y +``` + +## Download options + +| Option | Description | +| ------------------------- | -------------------------------------------------------- | +| `--limit ` | Max pages to download | +| `--search ` | Filter URLs by search query | +| `--include-paths ` | Only download matching paths | +| `--exclude-paths ` | Skip matching paths | +| `--allow-subdomains` | Include subdomain pages | +| `-y` | Skip confirmation prompt (always use in automated flows) | + +## Scrape options (all work with download) + +`-f `, `-H`, `-S`, `--screenshot`, `--full-page-screenshot`, `--only-main-content`, `--include-tags`, `--exclude-tags`, `--wait-for`, `--max-age`, `--country`, `--languages` + +## See also + +- [firecrawl-map](../../firecrawl-map/SKILL.md) — just discover URLs without downloading +- [firecrawl-scrape](../../firecrawl-scrape/SKILL.md) — scrape individual pages +- [firecrawl-crawl](../../firecrawl-crawl/SKILL.md) — bulk extract as JSON (not local files) diff --git a/skills/firecrawl-map/SKILL.md b/skills/firecrawl-map/SKILL.md new file mode 100644 index 0000000..f8047fc --- /dev/null +++ b/skills/firecrawl-map/SKILL.md @@ -0,0 +1,50 @@ +--- +name: firecrawl-map +description: | + Discover and list all URLs on a website, with optional search filtering. Use this skill when the user wants to find a specific page on a large site, list all URLs, see the site structure, find where something is on a domain, or says "map the site", "find the URL for", "what pages are on", or "list all pages". Essential when the user knows which site but not which exact page. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl map + +Discover URLs on a site. Use `--search` to find a specific page within a large site. + +## When to use + +- You need to find a specific subpage on a large site +- You want a list of all URLs on a site before scraping or crawling +- Step 3 in the [workflow escalation pattern](firecrawl-cli): search → scrape → **map** → crawl → browser + +## Quick start + +```bash +# Find a specific page on a large site +firecrawl map "" --search "authentication" -o .firecrawl/filtered.txt + +# Get all URLs +firecrawl map "" --limit 500 --json -o .firecrawl/urls.json +``` + +## Options + +| Option | Description | +| --------------------------------- | ---------------------------- | +| `--limit ` | Max number of URLs to return | +| `--search ` | Filter URLs by search query | +| `--sitemap ` | Sitemap handling strategy | +| `--include-subdomains` | Include subdomain URLs | +| `--json` | Output as JSON | +| `-o, --output ` | Output file path | + +## Tips + +- **Map + scrape is a common pattern**: use `map --search` to find the right URL, then `scrape` it. +- Example: `map https://docs.example.com --search "auth"` → found `/docs/api/authentication` → `scrape` that URL. + +## See also + +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — scrape the URLs you discover +- [firecrawl-crawl](../firecrawl-crawl/SKILL.md) — bulk extract instead of map + scrape +- [firecrawl-download](../firecrawl-download/SKILL.md) — download entire site (uses map internally) diff --git a/skills/firecrawl-scrape/SKILL.md b/skills/firecrawl-scrape/SKILL.md new file mode 100644 index 0000000..a090f48 --- /dev/null +++ b/skills/firecrawl-scrape/SKILL.md @@ -0,0 +1,63 @@ +--- +name: firecrawl-scrape +description: | + Extract clean markdown from any URL, including JavaScript-rendered SPAs. Use this skill whenever the user provides a URL and wants its content, says "scrape", "grab", "fetch", "pull", "get the page", "extract from this URL", or "read this webpage". Handles JS-rendered pages, multiple concurrent URLs, and returns LLM-optimized markdown. Use this instead of WebFetch for any webpage content extraction. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl scrape + +Scrape one or more URLs. Returns clean, LLM-optimized markdown. Multiple URLs are scraped concurrently. + +## When to use + +- You have a specific URL and want its content +- The page is static or JS-rendered (SPA) +- Step 2 in the [workflow escalation pattern](firecrawl-cli): search → **scrape** → map → crawl → browser + +## Quick start + +```bash +# Basic markdown extraction +firecrawl scrape "" -o .firecrawl/page.md + +# Main content only, no nav/footer +firecrawl scrape "" --only-main-content -o .firecrawl/page.md + +# Wait for JS to render, then scrape +firecrawl scrape "" --wait-for 3000 -o .firecrawl/page.md + +# Multiple URLs (each saved to .firecrawl/) +firecrawl scrape https://example.com https://example.com/blog https://example.com/docs + +# Get markdown and links together +firecrawl scrape "" --format markdown,links -o .firecrawl/page.json +``` + +## Options + +| Option | Description | +| ------------------------ | ---------------------------------------------------------------- | +| `-f, --format ` | Output formats: markdown, html, rawHtml, links, screenshot, json | +| `-H` | Include HTTP headers in output | +| `--only-main-content` | Strip nav, footer, sidebar — main content only | +| `--wait-for ` | Wait for JS rendering before scraping | +| `--include-tags ` | Only include these HTML tags | +| `--exclude-tags ` | Exclude these HTML tags | +| `-o, --output ` | Output file path | + +## Tips + +- **Try scrape before browser.** Scrape handles static pages and JS-rendered SPAs. Only escalate to browser when you need interaction (clicks, form fills, pagination). +- Multiple URLs are scraped concurrently — check `firecrawl --status` for your concurrency limit. +- Single format outputs raw content. Multiple formats (e.g., `--format markdown,links`) output JSON. +- Always quote URLs — shell interprets `?` and `&` as special characters. +- Naming convention: `.firecrawl/{site}-{path}.md` + +## See also + +- [firecrawl-search](../firecrawl-search/SKILL.md) — find pages when you don't have a URL +- [firecrawl-browser](../firecrawl-browser/SKILL.md) — when scrape can't get the content (interaction needed) +- [firecrawl-download](../firecrawl-download/SKILL.md) — bulk download an entire site to local files diff --git a/skills/firecrawl-search/SKILL.md b/skills/firecrawl-search/SKILL.md new file mode 100644 index 0000000..aec4232 --- /dev/null +++ b/skills/firecrawl-search/SKILL.md @@ -0,0 +1,59 @@ +--- +name: firecrawl-search +description: | + Web search with full page content extraction. Use this skill whenever the user asks to search the web, find articles, research a topic, look something up, find recent news, discover sources, or says "search for", "find me", "look up", "what are people saying about", or "find articles about". Returns real search results with optional full-page markdown — not just snippets. Provides capabilities beyond Claude's built-in WebSearch. +allowed-tools: + - Bash(firecrawl *) + - Bash(npx firecrawl *) +--- + +# firecrawl search + +Web search with optional content scraping. Returns search results as JSON, optionally with full page content. + +## When to use + +- You don't have a specific URL yet +- You need to find pages, answer questions, or discover sources +- First step in the [workflow escalation pattern](firecrawl-cli): search → scrape → map → crawl → browser + +## Quick start + +```bash +# Basic search +firecrawl search "your query" -o .firecrawl/result.json --json + +# Search and scrape full page content from results +firecrawl search "your query" --scrape -o .firecrawl/scraped.json --json + +# News from the past day +firecrawl search "your query" --sources news --tbs qdr:d -o .firecrawl/news.json --json +``` + +## Options + +| Option | Description | +| ------------------------------------ | --------------------------------------------- | +| `--limit ` | Max number of results | +| `--sources ` | Source types to search | +| `--categories ` | Filter by category | +| `--tbs ` | Time-based search filter | +| `--location` | Location for search results | +| `--country ` | Country code for search | +| `--scrape` | Also scrape full page content for each result | +| `--scrape-formats` | Formats when scraping (default: markdown) | +| `-o, --output ` | Output file path | +| `--json` | Output as JSON | + +## Tips + +- **`--scrape` fetches full content** — don't re-scrape URLs from search results. This saves credits and avoids redundant fetches. +- Always write results to `.firecrawl/` with `-o` to avoid context window bloat. +- Use `jq` to extract URLs or titles: `jq -r '.data.web[].url' .firecrawl/search.json` +- Naming convention: `.firecrawl/search-{query}.json` or `.firecrawl/search-{query}-scraped.json` + +## See also + +- [firecrawl-scrape](../firecrawl-scrape/SKILL.md) — scrape a specific URL +- [firecrawl-map](../firecrawl-map/SKILL.md) — discover URLs within a site +- [firecrawl-crawl](../firecrawl-crawl/SKILL.md) — bulk extract from a site diff --git a/src/__tests__/commands/init.test.ts b/src/__tests__/commands/init.test.ts new file mode 100644 index 0000000..2528cfb --- /dev/null +++ b/src/__tests__/commands/init.test.ts @@ -0,0 +1,46 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { execSync } from 'child_process'; +import { handleInitCommand } from '../../commands/init'; + +vi.mock('child_process', () => ({ + execSync: vi.fn(), +})); + +describe('handleInitCommand', () => { + beforeEach(() => { + vi.clearAllMocks(); + vi.spyOn(console, 'log').mockImplementation(() => {}); + vi.spyOn(console, 'error').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('installs skills globally across all detected agents in non-interactive mode', async () => { + await handleInitCommand({ + yes: true, + skipInstall: true, + skipAuth: true, + }); + + expect(execSync).toHaveBeenCalledWith( + 'npx -y skills add firecrawl/cli --full-depth --global --all --yes', + { stdio: 'inherit' } + ); + }); + + it('scopes non-interactive skills install to one agent when provided', async () => { + await handleInitCommand({ + yes: true, + skipInstall: true, + skipAuth: true, + agent: 'cursor', + }); + + expect(execSync).toHaveBeenCalledWith( + 'npx -y skills add firecrawl/cli --full-depth --global --yes --agent cursor', + { stdio: 'inherit' } + ); + }); +}); diff --git a/src/__tests__/commands/setup.test.ts b/src/__tests__/commands/setup.test.ts new file mode 100644 index 0000000..78723c4 --- /dev/null +++ b/src/__tests__/commands/setup.test.ts @@ -0,0 +1,35 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { execSync } from 'child_process'; +import { handleSetupCommand } from '../../commands/setup'; + +vi.mock('child_process', () => ({ + execSync: vi.fn(), +})); + +describe('handleSetupCommand', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('installs skills globally across all detected agents by default', async () => { + await handleSetupCommand('skills', {}); + + expect(execSync).toHaveBeenCalledWith( + 'npx -y skills add firecrawl/cli --full-depth --global --all', + { stdio: 'inherit' } + ); + }); + + it('installs skills globally for a specific agent without using --all', async () => { + await handleSetupCommand('skills', { agent: 'cursor' }); + + expect(execSync).toHaveBeenCalledWith( + 'npx -y skills add firecrawl/cli --full-depth --global --agent cursor', + { stdio: 'inherit' } + ); + }); +}); diff --git a/src/commands/browser.ts b/src/commands/browser.ts index 4d2eeb4..c433cde 100644 --- a/src/commands/browser.ts +++ b/src/commands/browser.ts @@ -109,7 +109,7 @@ export async function handleBrowserLaunch( const lines: string[] = []; lines.push(`Session ID: ${data.id}`); lines.push(`CDP URL: ${data.cdpUrl}`); - if (data.interactiveLiveViewUrl) { + if ((data as any).interactiveLiveViewUrl) { lines.push( `Interactive Live View URL (recommended): ${(data as any).interactiveLiveViewUrl}` ); diff --git a/src/commands/init.ts b/src/commands/init.ts index b8da21f..88eb493 100644 --- a/src/commands/init.ts +++ b/src/commands/init.ts @@ -8,6 +8,7 @@ import { execSync } from 'child_process'; import { isAuthenticated, browserLogin, interactiveLogin } from '../utils/auth'; import { saveCredentials } from '../utils/credentials'; import { updateConfig, getApiKey } from '../utils/config'; +import { buildSkillsInstallArgs } from './skills-install'; export interface InitOptions { global?: boolean; @@ -214,11 +215,12 @@ async function stepIntegrations(options: InitOptions): Promise { switch (integration) { case 'skills': { console.log(`\n Setting up skills...`); - const args = ['npx', '-y', 'skills', 'add', 'firecrawl/cli']; - if (options.all) args.push('--all'); - if (options.yes || options.all) args.push('--yes'); - if (options.global) args.push('--global'); - if (options.agent) args.push('--agent', options.agent); + const args = buildSkillsInstallArgs({ + agent: options.agent, + yes: options.yes || options.all, + global: true, + includeNpxYes: true, + }); try { execSync(args.join(' '), { stdio: 'inherit' }); console.log(` ${green}✓${reset} Skills installed`); @@ -591,7 +593,7 @@ async function runNonInteractive(options: InitOptions): Promise { console.log(`${stepLabel()} Authenticating with Firecrawl...`); try { let result: { apiKey: string; apiUrl?: string; teamName?: string }; - if (options.browser) { + if (options.browser || !options.apiKey) { result = await browserLogin(); } else { result = await interactiveLogin(); @@ -614,11 +616,12 @@ async function runNonInteractive(options: InitOptions): Promise { console.log( `${stepLabel()} Installing firecrawl skill for AI coding agents...` ); - const args = ['npx', '-y', 'skills', 'add', 'firecrawl/cli']; - if (options.all) args.push('--all'); - if (options.yes || options.all) args.push('--yes'); - if (options.global) args.push('--global'); - if (options.agent) args.push('--agent', options.agent); + const args = buildSkillsInstallArgs({ + agent: options.agent, + yes: true, + global: true, + includeNpxYes: true, + }); try { execSync(args.join(' '), { stdio: 'inherit' }); console.log(`${green}✓${reset} Skills installed\n`); diff --git a/src/commands/setup.ts b/src/commands/setup.ts index 3248406..a9c0379 100644 --- a/src/commands/setup.ts +++ b/src/commands/setup.ts @@ -5,6 +5,7 @@ import { execSync } from 'child_process'; import { getApiKey } from '../utils/config'; +import { buildSkillsInstallArgs } from './skills-install'; export type SetupSubcommand = 'skills' | 'mcp'; @@ -39,15 +40,11 @@ export async function handleSetupCommand( } async function installSkills(options: SetupOptions): Promise { - const args = ['npx', 'skills', 'add', 'firecrawl/cli']; - - if (options.global) { - args.push('--global'); - } - - if (options.agent) { - args.push('--agent', options.agent); - } + const args = buildSkillsInstallArgs({ + agent: options.agent, + global: true, + includeNpxYes: true, + }); const cmd = args.join(' '); console.log(`Running: ${cmd}\n`); diff --git a/src/commands/skills-install.ts b/src/commands/skills-install.ts new file mode 100644 index 0000000..b44a0eb --- /dev/null +++ b/src/commands/skills-install.ts @@ -0,0 +1,38 @@ +export interface SkillsInstallCommandOptions { + agent?: string; + all?: boolean; + yes?: boolean; + global?: boolean; + includeNpxYes?: boolean; +} + +export function buildSkillsInstallArgs( + options: SkillsInstallCommandOptions = {} +): string[] { + const args = ['npx']; + + if (options.includeNpxYes) { + args.push('-y'); + } + + args.push('skills', 'add', 'firecrawl/cli', '--full-depth'); + + if (options.global ?? true) { + args.push('--global'); + } + + const installToAllAgents = options.agent ? false : (options.all ?? true); + if (installToAllAgents) { + args.push('--all'); + } + + if (options.yes) { + args.push('--yes'); + } + + if (options.agent) { + args.push('--agent', options.agent); + } + + return args; +} diff --git a/src/index.ts b/src/index.ts index 0494592..2ceb242 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1113,9 +1113,15 @@ program '[template]', 'Template to scaffold (e.g. browser-nextjs, scrape-express)' ) - .option('--all', 'Install skills to all detected agents (implies --yes)') - .option('-y, --yes', 'Skip confirmation prompts for skills installation') - .option('-g, --global', 'Install skills globally (user-level)') + .option( + '--all', + 'Explicitly install skills to all detected agents (default unless --agent is used)' + ) + .option( + '-y, --yes', + 'Run init non-interactively; skills still install globally across all detected agents unless --agent is used' + ) + .option('-g, --global', 'Install skills globally (user-level, default)') .option('-a, --agent ', 'Install skills to a specific agent') .option( '-k, --api-key ', @@ -1235,6 +1241,16 @@ async function main() { return; } + // Shorthand: `firecrawl -y` → `firecrawl init --all --browser` + if ( + args.length >= 1 && + (args[0] === '-y' || args[0] === '--yes') && + args.length <= 1 + ) { + await handleInitCommand({ yes: true, all: true, browser: true }); + return; + } + // Check if first argument is a template name if (!args[0].startsWith('-') && findTemplate(args[0])) { await scaffoldTemplate(args[0]);