440e097d78
This commit is in a pretty rough state. It refactors the crawler fairly significantly to offer better separation of concerns. It replaces the zstd compressed json files used to store crawl data with WARC files entirely, and the converter is modified to be able to consume this data. This works, -ish. There appears to be some bug relating to reading robots.txt, and the X-Robots-Tag header is no longer processed either. A problem is that the WARC files are a bit too large. It will probably be likely to introduce a new format to store the crawl data long term, something like parquet; and use WARCs for intermediate storage to enable the crawler to be restarted without needing a recrawl.
42 lines
1.1 KiB
Groovy
42 lines
1.1 KiB
Groovy
plugins {
|
|
id 'java'
|
|
|
|
|
|
id 'jvm-test-suite'
|
|
}
|
|
|
|
|
|
java {
|
|
toolchain {
|
|
languageVersion.set(JavaLanguageVersion.of(21))
|
|
}
|
|
}
|
|
|
|
dependencies {
|
|
implementation project(':code:common:model')
|
|
implementation project(':code:common:db')
|
|
implementation project(':code:common:process')
|
|
implementation project(':code:libraries:big-string')
|
|
implementation project(':code:api:index-api')
|
|
implementation project(':code:common:service-discovery')
|
|
implementation project(':code:common:service-client')
|
|
implementation project(':code:features-crawl:content-type')
|
|
implementation project(':code:libraries:language-processing')
|
|
|
|
implementation libs.bundles.slf4j
|
|
|
|
implementation libs.notnull
|
|
|
|
implementation libs.jwarc
|
|
implementation libs.gson
|
|
implementation libs.commons.io
|
|
implementation libs.okhttp3
|
|
implementation libs.jsoup
|
|
implementation libs.snakeyaml
|
|
implementation libs.zstd
|
|
|
|
testImplementation libs.bundles.slf4j.test
|
|
testImplementation libs.bundles.junit
|
|
testImplementation libs.mockito
|
|
}
|