(minor) Clean up dead endpoints

This commit is contained in:
Viktor Lofgren 2023-08-29 17:04:54 +02:00
parent dd593c292c
commit 3f288e264b
9 changed files with 21 additions and 53 deletions

View File

@ -3,8 +3,6 @@ package nu.marginalia.index.client;
public class IndexMqEndpoints {
public static final String INDEX_IS_BLOCKED = "INDEX-IS-BLOCKED";
public static final String INDEX_REPARTITION = "INDEX-REPARTITION";
public static final String INDEX_RELOAD_LEXICON = "INDEX-RELOAD-LEXICON";
public static final String INDEX_REINDEX = "INDEX-REINDEX";
public static final String SWITCH_INDEX = "SWITCH-INDEX";
}

View File

@ -19,6 +19,11 @@ described in [converting-model](../process-models/converting-model/).
The [loading-process](loading-process/) reads the processed data and creates an index journal
and lexicon, and loads domains and addresses into the MariaDB-database.
## 4. Index Construction Process
The [index-construction-process](index-constructor-process/) constructs indices from
the data generated by the loader.
## Overview
Schematically the crawling and loading process looks like this:
@ -65,8 +70,12 @@ Schematically the crawling and loading process looks like this:
\\==================//
|
+------------+
| LOADING | Insert URLs in DB
| LOADING | Insert URLs in link DB
| STEP | Insert keywords in Index
+------------+
|
+------------+
| CONSTRUCT | Make the data searchable
| INDEX |
+------------+
```

View File

@ -185,7 +185,6 @@ public class ControlService extends Service {
Spark.post("/public/actions/calculate-adjacencies", controlActionsService::calculateAdjacencies, redirectToActors);
Spark.post("/public/actions/reload-blogs-list", controlActionsService::reloadBlogsList, redirectToActors);
Spark.post("/public/actions/repartition-index", controlActionsService::triggerRepartition, redirectToActors);
Spark.post("/public/actions/reconstruct-index", controlActionsService::triggerIndexReconstruction, redirectToActors);
Spark.post("/public/actions/trigger-data-exports", controlActionsService::triggerDataExports, redirectToActors);
Spark.post("/public/actions/flush-api-caches", controlActionsService::flushApiCaches, redirectToActors);
Spark.post("/public/actions/truncate-links-database", controlActionsService::truncateLinkDatabase, redirectToActors);

View File

@ -10,7 +10,6 @@ import nu.marginalia.actor.ActorStateFactory;
import nu.marginalia.control.process.ProcessOutboxes;
import nu.marginalia.control.process.ProcessService;
import nu.marginalia.control.svc.BackupService;
import nu.marginalia.db.storage.model.FileStorage;
import nu.marginalia.index.client.IndexClient;
import nu.marginalia.index.client.IndexMqEndpoints;
import nu.marginalia.mqapi.converting.ConvertAction;
@ -29,15 +28,11 @@ import nu.marginalia.actor.state.ActorState;
import nu.marginalia.actor.state.ActorResumeBehavior;
import nu.marginalia.search.client.SearchClient;
import nu.marginalia.search.client.SearchMqEndpoints;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.luben.zstd.ZstdOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.sql.SQLException;
import java.time.LocalDateTime;
@Singleton
public class ConvertAndLoadActor extends AbstractActorPrototype {
@ -298,7 +293,7 @@ public class ConvertAndLoadActor extends AbstractActorPrototype {
public void switchOver(Long id) throws Exception {
// Notify services to switch over
searchOutbox.sendNotice(SearchMqEndpoints.SWITCH_LINKDB, ":-)");
indexOutbox.sendNotice(IndexMqEndpoints.INDEX_REINDEX, ":^D");
indexOutbox.sendNotice(IndexMqEndpoints.SWITCH_INDEX, ":^D");
}
}

View File

@ -4,14 +4,12 @@ import com.google.inject.Inject;
import com.google.inject.Singleton;
import nu.marginalia.control.actor.ControlActors;
import nu.marginalia.control.actor.Actor;
import nu.marginalia.db.DbDomainQueries;
import nu.marginalia.db.DomainTypes;
import nu.marginalia.index.client.IndexClient;
import nu.marginalia.index.client.IndexMqEndpoints;
import nu.marginalia.mq.MessageQueueFactory;
import nu.marginalia.mq.outbox.MqOutbox;
import nu.marginalia.search.client.SearchClient;
import nu.marginalia.search.client.SearchMqEndpoints;
import nu.marginalia.service.control.ServiceEventLog;
import nu.marginalia.service.id.ServiceId;
import spark.Request;
@ -24,7 +22,6 @@ import java.util.UUID;
public class ControlActionsService {
private final ControlActors actors;
private final SearchClient searchClient;
private final IndexClient indexClient;
private final MqOutbox apiOutbox;
private final ServiceEventLog eventLog;
@ -32,14 +29,12 @@ public class ControlActionsService {
@Inject
public ControlActionsService(ControlActors actors,
SearchClient searchClient,
IndexClient indexClient,
MessageQueueFactory mqFactory,
ServiceEventLog eventLog,
DomainTypes domainTypes) {
this.actors = actors;
this.searchClient = searchClient;
this.indexClient = indexClient;
this.apiOutbox = createApiOutbox(mqFactory);
this.eventLog = eventLog;
@ -107,10 +102,4 @@ public class ControlActionsService {
return null;
}
public Object triggerIndexReconstruction(Request request, Response response) throws Exception {
indexClient.outbox().sendAsync(IndexMqEndpoints.INDEX_REINDEX, "");
return null;
}
}

View File

@ -45,16 +45,6 @@
</form>
</td>
</tr>
<tr>
<td><b>Reconstruct Index</b><p>
This will reconstruct the index from the index journal.
</td>
<td>
<form method="post" action="/actions/reconstruct-index" onsubmit="return confirm('Confirm reconstruction')">
<input type="submit" value="Trigger Reconstruction">
</form>
</td>
</tr>
<tr>
<td><b>Flush <tt>api-service</tt> Caches</b><p>
This will instruct the api-service to flush its caches,
@ -62,7 +52,7 @@
changes to the API licenses directly through the database.
</td>
<td>
<form method="post" action="/actions/flush-index-caches" onsubmit="return confirm('Confirm flushing api chaches')">
<form method="post" action="/actions/flush-api-caches" onsubmit="return confirm('Confirm flushing api chaches')">
<input type="submit" value="Flush API">
</form>
</td>

View File

@ -20,7 +20,6 @@ import spark.Request;
import spark.Response;
import spark.Spark;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static spark.Spark.get;
@ -75,12 +74,6 @@ public class IndexService extends Service {
volatile boolean initialized = false;
@MqRequest(endpoint = IndexMqEndpoints.INDEX_RELOAD_LEXICON)
public String reloadLexicon(String message) throws Exception {
throw new UnsupportedOperationException();
}
@MqRequest(endpoint = IndexMqEndpoints.INDEX_REPARTITION)
public String repartition(String message) {
if (!opsService.repartition()) {
@ -89,9 +82,9 @@ public class IndexService extends Service {
return "ok";
}
@MqNotification(endpoint = IndexMqEndpoints.INDEX_REINDEX)
public String reindex(String message) throws Exception {
if (!opsService.reindex()) {
@MqNotification(endpoint = IndexMqEndpoints.SWITCH_INDEX)
public String switchIndex(String message) throws Exception {
if (!opsService.switchIndex()) {
throw new IllegalStateException("Ops lock busy");
}

View File

@ -23,14 +23,11 @@ public class IndexServicesFactory {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final Path searchSetsBase;
final int LIVE_PART = 0;
final int NEXT_PART = 1;
@Inject
public IndexServicesFactory(
ServiceHeartbeat heartbeat,
FileStorageService fileStorageService
) throws IOException, SQLException {
) throws SQLException {
liveStorage = fileStorageService.getStorageByType(FileStorageType.INDEX_LIVE).asPath();
searchSetsBase = fileStorageService.getStorageByType(FileStorageType.SEARCH_SETS).asPath();

View File

@ -33,10 +33,8 @@ public class IndexOpsService {
public boolean repartition() {
return run(searchSetService::recalculateAll);
}
public boolean reindex() throws Exception {
return run(() -> {
return index.switchIndex();
}).isPresent();
public boolean switchIndex() throws Exception {
return run(index::switchIndex).isPresent();
}