';
- for(var i=0; i<8; i++){
+ for(var i=0; i<7; i++){
html += '' + sorted_list[k].server.ping + '';
- break;
- }
-
- if(sorted_list[k].server.ping <= 200){
-
- html += '>' + sorted_list[k].server.ping + '';
- break;
- }
-
- html += '>' + number_format(sorted_list[k].server.ping) + '';
- break;
-
// server name
- case 1: html += ' class="extend">' + htmlspecialchars(sorted_list[k].server.name); break;
- case 2: html += '>' + htmlspecialchars(new URL(sorted_list[k].server.ip).host); break;
- case 3: // bot protection
+ case 0: html += ' class="extend">' + htmlspecialchars(sorted_list[k].server.name); break;
+ case 1: html += '>' + htmlspecialchars(new URL(sorted_list[k].server.ip).host); break;
+ case 2: // bot protection
switch(sorted_list[k].server.bot_protection){
case 0:
@@ -407,15 +374,15 @@ function render_list(){
}
break;
- case 4: // real reqs
+ case 3: // real reqs
html += '>' + number_format(sorted_list[k].server.real_requests);
break;
- case 5: // bot reqs
+ case 4: // bot reqs
html += '>' + number_format(sorted_list[k].server.bot_requests);
break;
- case 6: // api enabled
+ case 5: // api enabled
if(sorted_list[k].server.api_enabled){
@@ -427,7 +394,7 @@ function render_list(){
break;
// version
- case 7: html += ">v" + sorted_list[k].server.version; break;
+ case 6: html += ">v" + sorted_list[k].server.version; break;
}
html += ' | ';
@@ -436,6 +403,8 @@ function render_list(){
html += '';
}
+ console.log(html);
+
tbody.innerHTML = html;
}
diff --git a/template/about.html b/template/about.html
index 12dd957..6398884 100644
--- a/template/about.html
+++ b/template/about.html
@@ -38,7 +38,7 @@ This is a metasearch engine that gets results from other engines, and strips awa
Provide users with a privacy oriented, extremely lightweight, ad free, free as in freedom (and free beer!) way to search for documents around the internet, with minimal, optional javascript code. My long term goal would be to build my own index (that doesn't suck) and provide users with an unbiased search engine, with no political inclinations.
Do you keep logs?
-I store data temporarly to get the next page of results. This might include search queries, tokens and other parameters. These parameters are encrypted using aes-256-gcm
on the serber, for which I give you a key (also known internally as npt
token). When you make a request to get the next page, you supply the token, the data is decrypted and the request is fulfilled. This encrypted data is deleted after 15 minutes, or after it's used, whichever comes first.
+I store data temporarly to get the next page of results. This might include search queries, filters and tokens. These parameters are encrypted using libsodium
on the serber, for which I give you a decryption key (also known internally as npt
token). When you make a request to get the next page, you supply the token, the data is decrypted and the request is fulfilled. This encrypted data is deleted after 15 minutes, or after it's used, whichever comes first.
I don't log IP addresses, user agents, or anything else. The npt
tokens are the only thing that are stored (in RAM, mind you), temporarly, encrypted.
@@ -48,7 +48,7 @@ Your search queries and supplied filters are shared with the scraper you chose (
TL;DR assume those websites can see what you search for, but can't see who you are (unless you're really dumb).
Where is this website hosted?
-This website is hosted on a Contabo shitbox in the United States.
+Please head over to the 4get instances page, select an instance and click on "IP lookup".
Keyboard shortcuts?
Use /
to focus the search box.