Return Page Source
Scrape the webpage in headless mode, but without any JavaScript rendering.
If you want WbScrapingAPI to return you the unaltered HTML file that is served by the server, without any JavaScript rendering, all you have to do is to enable the return_page_source feature.
This feature is similar to a cURL request, but usses an actual browser to return the HTML while also evading detection. In order to use return_page_source you also have to turn on JavaScript rendering (render_js=1)
Your full GET request should then be sent to the following address:
https://api.webscrapingapi.com/v1?api_key=<YOUR_API_KEY>&url=<TARGETED_URL>&render_js=1&return_page_source=1

Sessions Integration Examples

cURL
NodeJS
Python
PHP
Go
Java
.NET
Ruby
curl --request GET --url "https://api.webscrapingapi.com/v1?api_key=<YOUR_API_KEY>&url=https://raportariarr.web.app/&render_js=1&return_page_source=1"
const http = require("https");
const options = {
"method": "GET",
"hostname": "api.webscrapingapi.com",
"port": null,
"path": "/v1?api_key=%7B%7Bapi_key%7D%7D&url=https://raportariarr.web.app/&render_js=1&return_page_source=1",
"headers": {}
};
const req = http.request(options, function (res) {
const chunks = [];
res.on("data", function (chunk) {
chunks.push(chunk);
});
res.on("end", function () {
const body = Buffer.concat(chunks);
console.log(body.toString());
});
});
req.end();
import requests
API_KEY = '<YOUR_API_KEY>'
SCRAPER_URL = 'https://api.webscrapingapi.com/v1'
TARGET_URL = 'https://raportariarr.web.app/'
PARAMS = {
"api_key":API_KEY,
"url": TARGET_URL,
"render_js":1,
"return_page_source":1
}
response = requests.get(SCRAPER_URL, params=PARAMS)
print(response.text)
<?php
$curl = curl_init();
curl_setopt_array($curl, [
CURLOPT_URL => "https://api.webscrapingapi.com/v1?api_key=%7B%7Bapi_key%7D%7D&url=https://raportariarr.web.app/&render_js=1&return_page_source=1",
CURLOPT_RETURNTRANSFER => true,
CURLOPT_ENCODING => "",
CURLOPT_MAXREDIRS => 10,
CURLOPT_TIMEOUT => 30,
CURLOPT_HTTP_VERSION => CURL_HTTP_VERSION_1_1,
CURLOPT_CUSTOMREQUEST => "GET",
]);
$response = curl_exec($curl);
$err = curl_error($curl);
curl_close($curl);
if ($err) {
echo "cURL Error #:" . $err;
} else {
echo $response;
}
package main
import (
"fmt"
"net/http"
"io/ioutil"
)
func main() {
url := "https://api.webscrapingapi.com/v1?api_key=%7B%7Bapi_key%7D%7D&url=https://raportariarr.web.app/&render_js=1&return_page_source=1"
req, _ := http.NewRequest("GET", url, nil)
res, _ := http.DefaultClient.Do(req)
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
fmt.Println(res)
fmt.Println(string(body))
}
HttpResponse<String> response = Unirest.get("https://api.webscrapingapi.com/v1?api_key=%7B%7Bapi_key%7D%7D&url=https://raportariarr.web.app/&render_js=1&return_page_source=1")
.asString();
var client = new RestClient("https://api.webscrapingapi.com/v1?api_key=%7B%7Bapi_key%7D%7D&url=https://raportariarr.web.app/&render_js=1&return_page_source=1");
var request = new RestRequest(Method.GET);
IRestResponse response = client.Execute(request);
require 'uri'
require 'net/http'
require 'openssl'
url = URI("https://api.webscrapingapi.com/v1?api_key=%7B%7Bapi_key%7D%7D&url=https://raportariarr.web.app/&render_js=1&return_page_source=1")
http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE
request = Net::HTTP::Get.new(url)
response = http.request(request)
puts response.read_body
Response Example
Copy link