Web Scraper
Fetch any URL and extract structured data including title, meta description, keywords, favicon, social links, all links on the page, emails, phone numbers, images, body text, and detected languages. Returns the site's HTTP status code separately so you can distinguish site errors (404, 500) from infrastructure errors. Use response_type=markdown to get only the page content as clean markdown.
Endpoint
/api/web-scraper.scrape
Generate Code with AI
Copy a ready-made prompt with all the endpoint details, parameters, and example responses. Paste it into ChatGPT, Claude, or any AI assistant to instantly generate working code.
Parameters
boolean
Optional
Set to true to include the raw HTML in the response. Default: false. Ignored when response_type=markdown
string
Optional
Set to "markdown" to return only the page content as clean markdown text. When set to markdown, only the markdown content is returned (not JSON). Default: json
Request Examples
<?php
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, 'https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com');
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, [
'x-api-key: YOUR_API_KEY_HERE',
]);
$response = curl_exec($ch);
curl_close($ch);
echo $response;
<?php
use Illuminate\Support\Facades\Http;
$response = Http::withHeaders([
'x-api-key' => 'YOUR_API_KEY_HERE',
])->get('https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com');
return $response->json();
fetch('https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com', {
headers: {
'x-api-key': 'YOUR_API_KEY_HERE'
}
})
.then(response => response.json())
.then(data => console.log(data))
.catch(error => console.error('Error:', error));
const axios = require('axios');
axios.get('https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com', {
headers: {
'x-api-key': 'YOUR_API_KEY_HERE'
}
})
.then(response => console.log(response.data))
.catch(error => console.error('Error:', error));
import requests
headers = {
'x-api-key': 'YOUR_API_KEY_HERE'
}
response = requests.get('https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com', headers=headers)
print(response.json())
require 'net/http'
require 'uri'
require 'json'
uri = URI.parse('https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com')
request = Net::HTTP::Get.new(uri)
request['x-api-key'] = 'YOUR_API_KEY_HERE'
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == 'https') do |http|
http.request(request)
end
puts JSON.parse(response.body)
package main
import (
"fmt"
"io/ioutil"
"net/http"
)
func main() {
client := &http.Client{}
req, _ := http.NewRequest("GET", "https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com", nil)
req.Header.Set("x-api-key", "YOUR_API_KEY_HERE")
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(string(body))
}
curl -X GET 'https://scrappa.co/api/web-scraper.scrape?url=https%3A%2F%2Fexample.com' \
-H 'x-api-key: YOUR_API_KEY_HERE'
Response Schema
{
"success": true,
"site_status_code": 200,
"url": "https://example.com",
"final_url": "https://www.example.com",
"data": {
"title": "Example Domain",
"description": "This domain is for use in illustrative examples.",
"keywords": [],
"favicon": "https://example.com/favicon.ico",
"social_links": {
"linkedin": null,
"twitter": null,
"facebook": null,
"instagram": null,
"youtube": null,
"tiktok": null
},
"extracted_keywords": [
"domain",
"example",
"illustrative"
],
"links": [
"https://www.iana.org/domains/example"
],
"emails": [],
"phone_numbers": [],
"images": [],
"body_text": "Example Domain This domain is for use in illustrative examples in documents.",
"languages_detected": [
"en"
],
"html": null
}
}