const Apify = require('apify'); var total_data=[]; const regex_name = /[A-Z][a-z]+\s[A-Z][a-z]+(?=\.|,|\s|\!|\?)/gm const regex_address = /stand:(<\/strong>)?\s+(\w+\s+\w+),?\s+(\w+\s+\w+)?/gm; const regex_email = /(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))/i; Apify.main(async () => { const requestQueue = await Apify.openRequestQueue('123'); await requestQueue.addRequest(new Apify.Request({ url: 'https://www.freeletics.com/de/pages/imprint/' })); await requestQueue.addRequest(new Apify.Request({ url: 'https://di1ara.com/pages/impressum' })); console.log('\nStart PuppeteerCrawler\n'); const crawler = new Apify.PuppeteerCrawler({ requestQueue, handlePageFunction: async ({ request, page }) => { const title = await page.title(); console.log(`Title of ${request.url}: ${title}`); const page_content = await page.content(); console.log(`Page content size:`, page_content.length); let obj = { 'url' : request.url }; console.log('Names:'); while ((m = regex_name.exec(page_content)) !== null) { // This is necessary to avoid infinite loops with zero-width matches if (m.index === regex_name.lastIndex) { regex_name.lastIndex++; } // The result can be accessed through the `m`-variable. m.forEach((match, groupIndex) => { console.log(`Found match, group ${groupIndex}: ${match}`); if (match !='undefined' ) { obj['names'] += match + ', '; } }); } console.log('\nAddress:'); while ((m = regex_address.exec(page_content)) !== null) { // This is necessary to avoid infinite loops with zero-width matches if (m.index === regex_address.lastIndex) { regex_address.lastIndex++; } // The result can be accessed through the `m`-variable. m.forEach((match, groupIndex) => { console.log(`Found match, group ${groupIndex}: ${match}`); }); m[0] = m[0].includes('</strong>') ? m[0].split('</strong>')[1] : m[0]; m[0] = m[0].replace('<', ''); obj['address']= m[0] ?? ''; } console.log('\Email:'); while ((m = regex_email.exec(page_content)) !== null) { // This is necessary to avoid infinite loops with zero-width matches if (m.index === regex_email.lastIndex) { regex_email.lastIndex++; } // The result can be accessed through the `m`-variable. m.forEach((match, groupIndex) => { console.log(`Found match, group ${groupIndex}: ${match}`); }); if (m[0]) { obj['email'] = m[0]; break; } } total_data.push(obj); console.log(obj); }, maxRequestsPerCrawl: 2000000, maxConcurrency: 20, }); await crawler.run(); console.log('Total data:'); console.log(total_data); });
Author: admin
Hoppscotch – API ecosystem
Add
Adding constraint NetAppToken composing of 3 columns: network, application, token.
Note: you supposed to have chosen a right db: use <database_name>;
ALTER TABLE crypto ADD CONSTRAINT NetAppToken UNIQUE (network, application, token);
View
SELECT table_schema, table_name, constraint_name FROM information_schema.table_constraints WHERE table_name = 'crypto';
Result
+--------------+------------+-----------------+
| table_schema | table_name | constraint_name |
+--------------+------------+-----------------+
| admin_crypto | crypto | PRIMARY |
| admin_crypto | crypto | NetAppToken |
+--------------+------------+-----------------+
2 rows in set (0.06 sec)
CURL request into Curl PHP code
Recently I needed to transform the CURL request into the PHP Curl code, binary data and compressed option having been involved. See the query itself:
curl 'https://terraswap-graph.terra.dev/graphql' -H 'Accept-Encoding: gzip, deflate, br' -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Connection: keep-alive' -H 'DNT: 1' -H 'Origin: https://terraswap-graph.terra.dev' --data-binary '{"query":"{\n pairs {\n pairAddress\n latestLiquidityUST\n token0 {\n tokenAddress\n symbol\n }\n token1 {\n tokenAddress\n symbol\n }\n commissionAPR\n volume24h {\n volumeUST\n }\n }\n}\n"}' --compressed
Auth proxy with JAVA
In the post we’ll show how to leverage auth ptoxy (with login/pass) for JAVA application.
In this post I’ll share how I’ve added a LetsEncrypt SSL certificate to a subdomain at VPS with Centos 7 using Vesta CP.
This post is devoted to the steps of how to create subdomain (Centos 7 and Vesta CP) and map a [Laravel] project folder to it.

Recently we encountered a new powerful scraping service called Web Scraper IDE [of Bright Data]. The life-test and thorough drill-in are coming soon. Yet now we want to highlight its main features that has badly (in positive sense, strongly) impressed us.

- Remove previous git origin
git remote remove origin
- Add new origin with PAT (<Token>) :
git remote add origin https://<TOKEN>@github.com/<USERNAME>/<REPO>.git
- Push once with –set-upstream
git push --set-upstream origin main
Now you might commit changes to the remote repo without adding PAT into a push command every time.
If you need to create PAT, use the following tut.
Random Forest vs Gradient boosting
The objective of the task is to build a model so that we can, as optimally as this data allows, relate molecular information, to an actual biological response.
We have shared the data in the comma separated values (CSV) format. Each row in this data set represents a molecule. The first column contains experimental data describing an actual biological response; the molecule was seen to elicit this response (1), or not (0). The remaining columns represent molecular descriptors (D1 through D1776), these are calculated properties that can capture some of the characteristics of the molecule – for example size, shape, or elemental constitution. The descriptor matrix has been normalized.