Lots of little changes, working on integration

This commit is contained in:
Farhan Khan 2021-03-17 23:14:24 +00:00
parent 1fb2577dcb
commit 26f56b2367
7 changed files with 127 additions and 28 deletions

52
docker-compose.yml Normal file
View File

@ -0,0 +1,52 @@
version: "3.9"
services:
fedilogue:
build: fedilogue/
depends_on:
- db
- vpn
cap_add:
- NET_ADMIN
ports:
- "127.0.0.1:5555:5555"
restapi:
build: restapi/
ports:
- "127.0.0.1:6431:6432"
depends_on:
- db
frontend:
build: frontend/
depends_on:
- restapi
- db
ports:
- "127.0.0.1:8080:8080"
db:
image: postgres:alpine
restart: always
environment:
POSTGRES_USER: fedilogue
POSTGRES_PASSWORD: fedilogue
POSTGRES_DB: fedilogue
volumes:
- ./postgres-data:/var/lib/postgresql
vpn:
image: bubuntux/nordvpn
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.conf.all.rp_filter=2
devices:
- /dev/net/tun
environment:
- USER=${NORDVPNUSER}
- "PASS=${NORDVPNPASS}"
- CONNECT=United_States
- TECHNOLOGY=NordLynx
ulimits:
memlock:
soft: -1
hard: -1

9
fedilogue/entrypoint.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/sh
sleep 15
route add default gw vpn
/usr/bin/psql -h db fedilogue fedilogue < tables.sql
cat tables.sql
echo /usr/bin/psql -h db fedilogue fedilogue
./fedilogue

View File

@ -28,6 +28,7 @@ func statusReport() {
mastodon := 0 mastodon := 0
pleroma := 0 pleroma := 0
other := 0 other := 0
ri_mutex.Lock()
for _, o := range runninginstances { for _, o := range runninginstances {
if o.Status == 200 { if o.Status == 200 {
running = running + 1 running = running + 1
@ -45,6 +46,7 @@ func statusReport() {
other = other + 1 other = other + 1
} }
} }
ri_mutex.Unlock()
logInfo("Running:",running," Keepalive:", keepalive, " Unsupported:", unsupported, " M:", mastodon, ",P:",pleroma,",O:",other) logInfo("Running:",running," Keepalive:", keepalive, " Unsupported:", unsupported, " M:", mastodon, ",P:",pleroma,",O:",other)
time.Sleep(time.Second*60) time.Sleep(time.Second*60)
} }

View File

@ -1,15 +1,11 @@
DROP TABLE IF EXISTS activities CASCADE; CREATE TABLE IF NOT EXISTS actors (
DROP TABLE IF EXISTS actors CASCADE;
DROP TABLE IF EXISTS instances CASCADE;
CREATE TABLE actors (
id SERIAL PRIMARY KEY, id SERIAL PRIMARY KEY,
document JSONB, document JSONB,
identifiedat TIMESTAMP with time zone DEFAULT now(), identifiedat TIMESTAMP with time zone DEFAULT now(),
instance VARCHAR(1000) NOT NULL instance VARCHAR(1000) NOT NULL
); );
CREATE TABLE activities ( CREATE TABLE IF NOT EXISTS activities (
id SERIAL PRIMARY KEY, id SERIAL PRIMARY KEY,
document JSONB, document JSONB,
normalized TEXT, normalized TEXT,
@ -18,7 +14,7 @@ CREATE TABLE activities (
); );
CREATE TABLE instances ( CREATE TABLE IF NOT EXISTS instances (
endpoint VARCHAR(2083) NOT NULL PRIMARY KEY UNIQUE, endpoint VARCHAR(2083) NOT NULL PRIMARY KEY UNIQUE,
autostart BOOLEAN, autostart BOOLEAN,
state VARCHAR(16), state VARCHAR(16),
@ -35,14 +31,14 @@ ALTER TABLE activities
GENERATED ALWAYS AS (to_tsvector('english', normalized)) STORED; GENERATED ALWAYS AS (to_tsvector('english', normalized)) STORED;
CREATE UNIQUE INDEX actors_uri_idx ON actors ( (document->>'id') ); CREATE UNIQUE INDEX IF NOT EXISTS actors_uri_idx ON actors ( (document->>'id') );
CREATE UNIQUE INDEX activities_uri_idx ON activities ( (document->>'id') ); CREATE UNIQUE INDEX IF NOT EXISTS activities_uri_idx ON activities ( (document->>'id') );
CREATE INDEX activities_published_idx ON activities ( (document->>'published') ); CREATE INDEX IF NOT EXISTS activities_published_idx ON activities ( (document->>'published') );
CREATE INDEX activities_identifiedat_idx ON activities (identifiedat); CREATE INDEX IF NOT EXISTS activities_identifiedat_idx ON activities (identifiedat);
CREATE INDEX normalized_idx ON activities USING gin(normalized_tsvector); CREATE INDEX IF NOT EXISTS normalized_idx ON activities USING gin(normalized_tsvector);
CREATE INDEX actors_id_idx ON actors (id); CREATE INDEX IF NOT EXISTS actors_id_idx ON actors (id);
CREATE INDEX activities_id_idx ON activities (id); CREATE INDEX IF NOT EXISTS activities_id_idx ON activities (id);

7
restapi/Dockerfile Normal file
View File

@ -0,0 +1,7 @@
FROM golang:alpine
COPY . /app
WORKDIR /app
RUN go build .
ENV DATABASE_URL "postgres://fedilogue:fedilogue@db/fedilogue"
ENV PGPASSWORD "fedilogue"
ENTRYPOINT ["./entrypoint.sh"]

View File

@ -1,19 +1,19 @@
package main package main
import ( import (
"os"
"context" "context"
"fmt"
"github.com/jackc/pgx/v4/pgxpool" "github.com/jackc/pgx/v4/pgxpool"
"log"
) )
var pool *pgxpool.Pool var pool *pgxpool.Pool
func getDbPool() *pgxpool.Pool { func getDbPool() *pgxpool.Pool {
// Setup Database // Setup Database
dbURI := fmt.Sprintf("postgres://%s:%s@%s:%d/fedilogue", settings.Database.Username, settings.Database.Password, settings.Database.Host, settings.Database.Port) pool, err := pgxpool.Connect(context.Background(), os.Getenv("DATABASE_URL"))
pool, err := pgxpool.Connect(context.Background(), dbURI)
if err != nil { if err != nil {
logFatal.Fatal("Unable to connect to database:", err) log.Fatal("Unable to connect to database:", err)
} }
return pool return pool
} }

View File

@ -6,13 +6,19 @@ import (
"net/http" "net/http"
"strings" "strings"
"context" "context"
_ "strconv" "strconv"
"encoding/json" "encoding/json"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
) )
func enableCors(w *http.ResponseWriter) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
}
func search(w http.ResponseWriter, r *http.Request) { func search(w http.ResponseWriter, r *http.Request) {
searchkeys, exists := r.URL.Query()["s"] enableCors(&w)
searchkeys, exists_search := r.URL.Query()["s"]
offsetkeys, exists_offset := r.URL.Query()["o"]
if strings.Contains(r.Header.Get("Accept"), "application/json") { if strings.Contains(r.Header.Get("Accept"), "application/json") {
log.Print("Treat as an Json") log.Print("Treat as an Json")
@ -24,14 +30,28 @@ func search(w http.ResponseWriter, r *http.Request) {
var err error var err error
var rows pgx.Rows var rows pgx.Rows
var searchKey string var searchKey string
if exists { if exists_search {
searchKey = searchkeys[0] searchKey = searchkeys[0]
} }
var offsetKey int
if exists && searchKey != "" { if exists_offset {
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' WHERE activities.normalized_tsvector @@ plainto_tsquery($1) ORDER BY activities.identifiedat DESC LIMIT 20", searchKey) offsetKey, _ = strconv.Atoi(offsetkeys[0])
} else { } else {
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' ORDER BY activities.identifiedat DESC LIMIT 20") offsetKey = -1
}
if exists_search && searchKey != "" {
if offsetKey == -1 {
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' WHERE activities.normalized_tsvector @@ plainto_tsquery($1) ORDER BY activities.identifiedat DESC LIMIT 10", searchKey)
} else {
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' WHERE activities.normalized_tsvector @@ plainto_tsquery($1) ORDER BY activities.identifiedat DESC LIMIT 10 OFFSET $2", searchKey, offsetKey)
}
} else {
if offsetKey == -1 {
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' ORDER BY activities.identifiedat DESC LIMIT 10")
} else {
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' ORDER BY activities.identifiedat DESC OFFSET $2 LIMIT 10 FETCH FIRST", offsetKey)
}
} }
if err != nil { if err != nil {
@ -39,7 +59,8 @@ func search(w http.ResponseWriter, r *http.Request) {
} }
defer rows.Close() defer rows.Close()
var lastid int
lastid = 0
var activitiesJson []map[string]json.RawMessage var activitiesJson []map[string]json.RawMessage
for rows.Next() { for rows.Next() {
var id int var id int
@ -56,12 +77,25 @@ func search(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
} }
if lastid == 0 {
lastid = id
} else if lastid > id {
lastid = id
}
activityJson["actor"] = json.RawMessage(actorRaw) activityJson["actor"] = json.RawMessage(actorRaw)
activitiesJson = append(activitiesJson, activityJson) activitiesJson = append(activitiesJson, activityJson)
} }
data, err := json.Marshal(activitiesJson) requestData := make(map[string]int)
requestData["lastid"] = lastid
requestData["total_results"] = 9999
totalJson := make(map[string]interface{} )
totalJson["requestdata"] = requestData
totalJson["activities"] = activitiesJson
data, err := json.Marshal(totalJson)
if err != nil { if err != nil {
log.Fatal("error marshaling combined activity: %v", err) log.Fatal("error marshaling combined activity: %v", err)
} }
@ -69,7 +103,6 @@ func search(w http.ResponseWriter, r *http.Request) {
} }
func main() { func main() {
getSettings()
pool = getDbPool() pool = getDbPool()
http.HandleFunc("/search", search) http.HandleFunc("/search", search)