Documentation Index Fetch the complete documentation index at: https://firecrawl-mog-search-exclude-include-domains.mintlify.app/llms.txt
Use this file to discover all available pages before exploring further.
Firecrawl se integra con Go mediante la API REST. Usa net/http para realizar solicitudes directamente.
package main
import (
" bytes "
" encoding/json "
" fmt "
" io "
" net/http "
" os "
)
func main () {
apiKey := os . Getenv ( "FIRECRAWL_API_KEY" )
body , _ := json . Marshal ( map [ string ] interface {}{
"query" : "firecrawl web scraping" ,
"limit" : 5 ,
})
req , _ := http . NewRequest ( "POST" , "https://api.firecrawl.dev/v2/search" , bytes . NewReader ( body ))
req . Header . Set ( "Authorization" , "Bearer " + apiKey )
req . Header . Set ( "Content-Type" , "application/json" )
resp , err := http . DefaultClient . Do ( req )
if err != nil {
fmt . Fprintf ( os . Stderr , "solicitud fallida: %v \n " , err )
os . Exit ( 1 )
}
defer resp . Body . Close ()
result , _ := io . ReadAll ( resp . Body )
fmt . Println ( string ( result ))
}
{
"success" : true ,
"data" : {
"web" : [
{
"url" : "https://docs.firecrawl.dev" ,
"title" : "Firecrawl Documentation" ,
"markdown" : "# Firecrawl \n\n Firecrawl is a web scraping API..."
}
]
}
}
Hacer scraping de una página
body , _ := json . Marshal ( map [ string ] string {
"url" : "https://example.com" ,
})
req , _ := http . NewRequest ( "POST" , "https://api.firecrawl.dev/v2/scrape" , bytes . NewReader ( body ))
req . Header . Set ( "Authorization" , "Bearer " + apiKey )
req . Header . Set ( "Content-Type" , "application/json" )
resp , err := http . DefaultClient . Do ( req )
if err != nil {
fmt . Fprintf ( os . Stderr , "request failed: %v \n " , err )
os . Exit ( 1 )
}
defer resp . Body . Close ()
result , _ := io . ReadAll ( resp . Body )
fmt . Println ( string ( result ))
{
"success" : true ,
"data" : {
"markdown" : "# Example Domain \n\n This domain is for use in illustrative examples..." ,
"metadata" : {
"title" : "Example Domain" ,
"sourceURL" : "https://example.com"
}
}
}
Interactúa con una página
Inicia una sesión del navegador, interactúa con la página utilizando prompts en lenguaje natural y luego cierra la sesión.
Paso 1 — Haz scraping para iniciar una sesión
body , _ := json . Marshal ( map [ string ] interface {}{
"url" : "https://www.amazon.com" ,
"formats" : [] string { "markdown" },
})
req , _ := http . NewRequest ( "POST" , "https://api.firecrawl.dev/v2/scrape" , bytes . NewReader ( body ))
req . Header . Set ( "Authorization" , "Bearer " + apiKey )
req . Header . Set ( "Content-Type" , "application/json" )
resp , err := http . DefaultClient . Do ( req )
if err != nil {
fmt . Fprintf ( os . Stderr , "request failed: %v \n " , err )
os . Exit ( 1 )
}
defer resp . Body . Close ()
var scrapeResult map [ string ] interface {}
json . NewDecoder ( resp . Body ). Decode ( & scrapeResult )
data := scrapeResult [ "data" ].( map [ string ] interface {})
metadata := data [ "metadata" ].( map [ string ] interface {})
scrapeId := metadata [ "scrapeId" ].( string )
fmt . Println ( "scrapeId:" , scrapeId )
Paso 2 — Enviar interacciones
// Buscar un producto
interactBody , _ := json . Marshal ( map [ string ] string {
"prompt" : "Search for iPhone 16 Pro Max" ,
})
interactURL := fmt . Sprintf ( "https://api.firecrawl.dev/v2/scrape/ %s /interact" , scrapeId )
req , _ = http . NewRequest ( "POST" , interactURL , bytes . NewReader ( interactBody ))
req . Header . Set ( "Authorization" , "Bearer " + apiKey )
req . Header . Set ( "Content-Type" , "application/json" )
resp , err = http . DefaultClient . Do ( req )
if err != nil {
fmt . Fprintf ( os . Stderr , "interact failed: %v \n " , err )
os . Exit ( 1 )
}
defer resp . Body . Close ()
result , _ := io . ReadAll ( resp . Body )
fmt . Println ( string ( result ))
// Hacer clic en el primer resultado
interactBody , _ = json . Marshal ( map [ string ] string {
"prompt" : "Click on the first result and tell me the price" ,
})
req , _ = http . NewRequest ( "POST" , interactURL , bytes . NewReader ( interactBody ))
req . Header . Set ( "Authorization" , "Bearer " + apiKey )
req . Header . Set ( "Content-Type" , "application/json" )
resp , err = http . DefaultClient . Do ( req )
if err != nil {
fmt . Fprintf ( os . Stderr , "interact failed: %v \n " , err )
os . Exit ( 1 )
}
defer resp . Body . Close ()
result , _ = io . ReadAll ( resp . Body )
fmt . Println ( string ( result ))
Paso 3 — Detener la sesión
req , _ = http . NewRequest ( "DELETE" , interactURL , nil )
req . Header . Set ( "Authorization" , "Bearer " + apiKey )
resp , err = http . DefaultClient . Do ( req )
if err != nil {
fmt . Fprintf ( os . Stderr , "error al eliminar: %v \n " , err )
os . Exit ( 1 )
}
defer resp . Body . Close ()
fmt . Println ( "Sesión detenida" )
Para reutilizarla, encapsula la API en un pequeño helper:
type FirecrawlClient struct {
APIKey string
BaseURL string
Client * http . Client
}
func NewFirecrawlClient ( apiKey string ) * FirecrawlClient {
return & FirecrawlClient {
APIKey : apiKey ,
BaseURL : "https://api.firecrawl.dev/v2" ,
Client : & http . Client {},
}
}
func ( fc * FirecrawlClient ) post ( endpoint string , payload interface {}) ([] byte , error ) {
body , err := json . Marshal ( payload )
if err != nil {
return nil , err
}
req , err := http . NewRequest ( "POST" , fc . BaseURL + endpoint , bytes . NewReader ( body ))
if err != nil {
return nil , err
}
req . Header . Set ( "Authorization" , "Bearer " + fc . APIKey )
req . Header . Set ( "Content-Type" , "application/json" )
resp , err := fc . Client . Do ( req )
if err != nil {
return nil , err
}
defer resp . Body . Close ()
return io . ReadAll ( resp . Body )
}
func ( fc * FirecrawlClient ) Scrape ( url string ) ([] byte , error ) {
return fc . post ( "/scrape" , map [ string ] string { "url" : url })
}
func ( fc * FirecrawlClient ) Search ( query string , limit int ) ([] byte , error ) {
return fc . post ( "/search" , map [ string ] interface {}{ "query" : query , "limit" : limit })
}
Documentación de búsqueda Busca en la web y obtén el contenido completo de la página
Documentación de scraping Todas las opciones de scraping, incluidos formatos, acciones y proxies
Documentación de interacción Haz clic, completa formularios y extrae contenido dinámico
Referencia de la API Documentación completa de la API REST