Lots of little changes, working on integration
This commit is contained in:
parent
1fb2577dcb
commit
26f56b2367
52
docker-compose.yml
Normal file
52
docker-compose.yml
Normal file
@ -0,0 +1,52 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
fedilogue:
|
||||
build: fedilogue/
|
||||
depends_on:
|
||||
- db
|
||||
- vpn
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
ports:
|
||||
- "127.0.0.1:5555:5555"
|
||||
restapi:
|
||||
build: restapi/
|
||||
ports:
|
||||
- "127.0.0.1:6431:6432"
|
||||
depends_on:
|
||||
- db
|
||||
frontend:
|
||||
build: frontend/
|
||||
depends_on:
|
||||
- restapi
|
||||
- db
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
db:
|
||||
image: postgres:alpine
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: fedilogue
|
||||
POSTGRES_PASSWORD: fedilogue
|
||||
POSTGRES_DB: fedilogue
|
||||
volumes:
|
||||
- ./postgres-data:/var/lib/postgresql
|
||||
vpn:
|
||||
image: bubuntux/nordvpn
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_MODULE
|
||||
sysctls:
|
||||
- net.ipv4.conf.all.rp_filter=2
|
||||
devices:
|
||||
- /dev/net/tun
|
||||
environment:
|
||||
- USER=${NORDVPNUSER}
|
||||
- "PASS=${NORDVPNPASS}"
|
||||
- CONNECT=United_States
|
||||
- TECHNOLOGY=NordLynx
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
9
fedilogue/entrypoint.sh
Executable file
9
fedilogue/entrypoint.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
sleep 15
|
||||
|
||||
route add default gw vpn
|
||||
/usr/bin/psql -h db fedilogue fedilogue < tables.sql
|
||||
cat tables.sql
|
||||
echo /usr/bin/psql -h db fedilogue fedilogue
|
||||
./fedilogue
|
@ -28,6 +28,7 @@ func statusReport() {
|
||||
mastodon := 0
|
||||
pleroma := 0
|
||||
other := 0
|
||||
ri_mutex.Lock()
|
||||
for _, o := range runninginstances {
|
||||
if o.Status == 200 {
|
||||
running = running + 1
|
||||
@ -45,6 +46,7 @@ func statusReport() {
|
||||
other = other + 1
|
||||
}
|
||||
}
|
||||
ri_mutex.Unlock()
|
||||
logInfo("Running:",running," Keepalive:", keepalive, " Unsupported:", unsupported, " M:", mastodon, ",P:",pleroma,",O:",other)
|
||||
time.Sleep(time.Second*60)
|
||||
}
|
||||
|
@ -1,15 +1,11 @@
|
||||
DROP TABLE IF EXISTS activities CASCADE;
|
||||
DROP TABLE IF EXISTS actors CASCADE;
|
||||
DROP TABLE IF EXISTS instances CASCADE;
|
||||
|
||||
CREATE TABLE actors (
|
||||
CREATE TABLE IF NOT EXISTS actors (
|
||||
id SERIAL PRIMARY KEY,
|
||||
document JSONB,
|
||||
identifiedat TIMESTAMP with time zone DEFAULT now(),
|
||||
instance VARCHAR(1000) NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE activities (
|
||||
CREATE TABLE IF NOT EXISTS activities (
|
||||
id SERIAL PRIMARY KEY,
|
||||
document JSONB,
|
||||
normalized TEXT,
|
||||
@ -18,7 +14,7 @@ CREATE TABLE activities (
|
||||
);
|
||||
|
||||
|
||||
CREATE TABLE instances (
|
||||
CREATE TABLE IF NOT EXISTS instances (
|
||||
endpoint VARCHAR(2083) NOT NULL PRIMARY KEY UNIQUE,
|
||||
autostart BOOLEAN,
|
||||
state VARCHAR(16),
|
||||
@ -35,14 +31,14 @@ ALTER TABLE activities
|
||||
GENERATED ALWAYS AS (to_tsvector('english', normalized)) STORED;
|
||||
|
||||
|
||||
CREATE UNIQUE INDEX actors_uri_idx ON actors ( (document->>'id') );
|
||||
CREATE UNIQUE INDEX activities_uri_idx ON activities ( (document->>'id') );
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS actors_uri_idx ON actors ( (document->>'id') );
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS activities_uri_idx ON activities ( (document->>'id') );
|
||||
|
||||
CREATE INDEX activities_published_idx ON activities ( (document->>'published') );
|
||||
CREATE INDEX activities_identifiedat_idx ON activities (identifiedat);
|
||||
CREATE INDEX IF NOT EXISTS activities_published_idx ON activities ( (document->>'published') );
|
||||
CREATE INDEX IF NOT EXISTS activities_identifiedat_idx ON activities (identifiedat);
|
||||
|
||||
CREATE INDEX normalized_idx ON activities USING gin(normalized_tsvector);
|
||||
CREATE INDEX IF NOT EXISTS normalized_idx ON activities USING gin(normalized_tsvector);
|
||||
|
||||
CREATE INDEX actors_id_idx ON actors (id);
|
||||
CREATE INDEX activities_id_idx ON activities (id);
|
||||
CREATE INDEX IF NOT EXISTS actors_id_idx ON actors (id);
|
||||
CREATE INDEX IF NOT EXISTS activities_id_idx ON activities (id);
|
||||
|
||||
|
7
restapi/Dockerfile
Normal file
7
restapi/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
||||
FROM golang:alpine
|
||||
COPY . /app
|
||||
WORKDIR /app
|
||||
RUN go build .
|
||||
ENV DATABASE_URL "postgres://fedilogue:fedilogue@db/fedilogue"
|
||||
ENV PGPASSWORD "fedilogue"
|
||||
ENTRYPOINT ["./entrypoint.sh"]
|
@ -1,19 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"log"
|
||||
)
|
||||
|
||||
var pool *pgxpool.Pool
|
||||
|
||||
func getDbPool() *pgxpool.Pool {
|
||||
// Setup Database
|
||||
dbURI := fmt.Sprintf("postgres://%s:%s@%s:%d/fedilogue", settings.Database.Username, settings.Database.Password, settings.Database.Host, settings.Database.Port)
|
||||
pool, err := pgxpool.Connect(context.Background(), dbURI)
|
||||
pool, err := pgxpool.Connect(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
logFatal.Fatal("Unable to connect to database:", err)
|
||||
log.Fatal("Unable to connect to database:", err)
|
||||
}
|
||||
return pool
|
||||
}
|
||||
|
@ -6,13 +6,19 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"context"
|
||||
_ "strconv"
|
||||
"strconv"
|
||||
"encoding/json"
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
func enableCors(w *http.ResponseWriter) {
|
||||
(*w).Header().Set("Access-Control-Allow-Origin", "*")
|
||||
}
|
||||
|
||||
func search(w http.ResponseWriter, r *http.Request) {
|
||||
searchkeys, exists := r.URL.Query()["s"]
|
||||
enableCors(&w)
|
||||
searchkeys, exists_search := r.URL.Query()["s"]
|
||||
offsetkeys, exists_offset := r.URL.Query()["o"]
|
||||
|
||||
if strings.Contains(r.Header.Get("Accept"), "application/json") {
|
||||
log.Print("Treat as an Json")
|
||||
@ -24,14 +30,28 @@ func search(w http.ResponseWriter, r *http.Request) {
|
||||
var err error
|
||||
var rows pgx.Rows
|
||||
var searchKey string
|
||||
if exists {
|
||||
if exists_search {
|
||||
searchKey = searchkeys[0]
|
||||
}
|
||||
|
||||
if exists && searchKey != "" {
|
||||
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' WHERE activities.normalized_tsvector @@ plainto_tsquery($1) ORDER BY activities.identifiedat DESC LIMIT 20", searchKey)
|
||||
var offsetKey int
|
||||
if exists_offset {
|
||||
offsetKey, _ = strconv.Atoi(offsetkeys[0])
|
||||
} else {
|
||||
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' ORDER BY activities.identifiedat DESC LIMIT 20")
|
||||
offsetKey = -1
|
||||
}
|
||||
|
||||
if exists_search && searchKey != "" {
|
||||
if offsetKey == -1 {
|
||||
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' WHERE activities.normalized_tsvector @@ plainto_tsquery($1) ORDER BY activities.identifiedat DESC LIMIT 10", searchKey)
|
||||
} else {
|
||||
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' WHERE activities.normalized_tsvector @@ plainto_tsquery($1) ORDER BY activities.identifiedat DESC LIMIT 10 OFFSET $2", searchKey, offsetKey)
|
||||
}
|
||||
} else {
|
||||
if offsetKey == -1 {
|
||||
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' ORDER BY activities.identifiedat DESC LIMIT 10")
|
||||
} else {
|
||||
rows, err = pool.Query(context.Background(), "SELECT activities.id, activities.document, actors.document FROM activities as activities INNER JOIN actors as actors ON activities.document->>'actor' = actors.document->>'id' ORDER BY activities.identifiedat DESC OFFSET $2 LIMIT 10 FETCH FIRST", offsetKey)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@ -39,7 +59,8 @@ func search(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
|
||||
var lastid int
|
||||
lastid = 0
|
||||
var activitiesJson []map[string]json.RawMessage
|
||||
for rows.Next() {
|
||||
var id int
|
||||
@ -56,12 +77,25 @@ func search(w http.ResponseWriter, r *http.Request) {
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
if lastid == 0 {
|
||||
lastid = id
|
||||
} else if lastid > id {
|
||||
lastid = id
|
||||
}
|
||||
|
||||
activityJson["actor"] = json.RawMessage(actorRaw)
|
||||
activitiesJson = append(activitiesJson, activityJson)
|
||||
}
|
||||
|
||||
data, err := json.Marshal(activitiesJson)
|
||||
requestData := make(map[string]int)
|
||||
requestData["lastid"] = lastid
|
||||
requestData["total_results"] = 9999
|
||||
|
||||
totalJson := make(map[string]interface{} )
|
||||
totalJson["requestdata"] = requestData
|
||||
totalJson["activities"] = activitiesJson
|
||||
|
||||
data, err := json.Marshal(totalJson)
|
||||
if err != nil {
|
||||
log.Fatal("error marshaling combined activity: %v", err)
|
||||
}
|
||||
@ -69,7 +103,6 @@ func search(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func main() {
|
||||
getSettings()
|
||||
pool = getDbPool()
|
||||
|
||||
http.HandleFunc("/search", search)
|
||||
|
Loading…
x
Reference in New Issue
Block a user