💾 Archived View for gmn.clttr.info › sources › geminispace.git › tree › gus › build_index.py.txt captured on 2022-06-03 at 23:42:01.
⬅️ Previous capture (2021-12-03)
-=-=-=-=-=-=-
import argparse import logging from datetime import datetime, timedelta from urllib.parse import uses_relative, uses_netloc from . import constants from gus.crawl import should_skip from gus.excludes import EXCLUDED_URL_PREFIXES from . import constants from gus.lib.db_model import init_db, Page from gus.lib.gemini import GeminiResource from gus.lib.index_statistics import ( compute_index_statistics, persist_statistics, log_index_statistics, ) import gus.lib.logging from gus.lib.logging import strip_control_chars import gus.lib.search as search # hack: the built-in methods in urllib need to know the # Gemini protocol exists uses_relative.append("gemini") uses_netloc.append("gemini") def index_page(index, page): if should_skip(GeminiResource(page.url)): logging.debug( "URL is excluded, skipping: %s", strip_control_chars(page.url), ) return False logging.info("Indexing page: %s", strip_control_chars(page.url)) u = page.url.rstrip("/") external_backlinks = Page.raw( """SELECT p_from.url FROM page AS p_from JOIN link as l ON l.from_page_id == p_from.id JOIN page as p_to ON p_to.id == l.to_page_id WHERE p_to.url == ? AND l.is_cross_host_like == 1""", u ) logging.debug("Calculating backlinks for %s", u) backlink_urls = [b.url for b in external_backlinks.execute()] backlink_count = len(backlink_urls) document = { "url_id": page.url, "url": page.url, "fetchable_url": page.fetchable_url, "domain": page.domain, "port": page.port, "content_type": page.content_type, "charset": page.charset or "none", "lang": page.lang, "size": page.size, "indexed_at": datetime.utcnow(), "backlink_count": backlink_count, "prompt": page.prompt, "content": page.content, } try: logging.debug("Adding document to index: %s", page.url); index.add_document(document) logging.debug("Document done") return True except Exception as e: logging.exception( "Failed to index page: %s: %s", strip_control_chars(page.url), e ) return False def build_index(should_run_destructive=False): index_dir = constants.INDEX_DIR_NEW if should_run_destructive else constants.INDEX_DIR db = init_db(f"{index_dir}/{constants.DB_FILENAME}") index = search.Index(index_dir, should_run_destructive) # delete pages that never successfull crawled count=0 q = Page.select().where(Page.last_crawl_success_at.is_null(True) & Page.last_crawl_at.is_null(False)) for page in q.iterator(): try: index.delete_by_term("url_id", page.url) page.delete_instance() count+=1 except Exception as e: logging.error("Failed to delete row %s with outdated successful crawl: %s", page.id, e) logging.warn("Deleted %d rows without successfull crawl", count) # delete pages with last crawl success older than 30 days which have been recrawled since than # this avoids deletion of files that have a change_frequency longer than our timeout #q = Page.select().where((Page.last_crawl_at > Page.last_crawl_success_at) & (Page.last_crawl_success_at < (datetime.now() + timedelta(days=-30)))) #try: # domains = q.execute() # for del_domain in domains: # logging.warn("Deleting pages for domain: %s", del_domain.domain) # # Page.delete().where(Page.domain = domain) #except Exception as e: # logging.error("Failed to delete domains with outdated successful crawl: %s", e) # delete pages with last crawl success older than 30 days which have been recrawled since than # this avoids deletion of files that have a change_frequency longer than our timeout count=0 q = Page.select().where((Page.last_crawl_at > Page.last_crawl_success_at) & (Page.last_crawl_success_at < (datetime.now() + timedelta(days=-30)))) for page in q.iterator(): try: index.delete_by_term("url_id", page.url) page.delete_instance() count+=1 except Exception as e: logging.error("Failed to delete row %s with outdated successful crawl: %s", page.id, e) logging.warn("Deleted %d rows with outdated successful crawl", count) if (should_run_destructive): pages = Page.raw( """SELECT p.* FROM page AS p WHERE p.last_success_status == 20 AND (p.content_type NOT LIKE 'text/%' OR (p.content_type LIKE 'text/%' AND p.size <= ?))""", constants.MAXIMUM_TEXT_PAGE_SIZE ) else: pages = Page.raw( """SELECT p.* FROM page AS p WHERE p.last_success_status == 20 AND (p.indexed_at IS NULL OR p.indexed_at < p.last_crawl_success_at) AND (p.content_type NOT LIKE 'text/%' OR (p.content_type LIKE 'text/%' AND p.size <= ?))""", constants.MAXIMUM_TEXT_PAGE_SIZE ) for page in pages.iterator(): index_page(index, page) page.indexed_at = datetime.utcnow() page.save() try: logging.info("Commiting search index...") index.close() logging.info("Updating raw data...") except Exception as e: logging.error('Closing of index failed: %s', e); logging.debug("Updating statistics...") index_statistics = compute_index_statistics(db) log_index_statistics(index_statistics) persist_statistics(index_statistics, None, should_run_destructive, "statistics.csv") logging.info("Finished!") def main(): args = parse_args() gus.lib.logging.handle_arguments(args) build_index(args.should_run_destructive) def parse_args(): parser = argparse.ArgumentParser(description="Crawl Geminispace.") parser.add_argument( "--destructive", "-d", dest="should_run_destructive", action="store_true", default=False, help="create a fresh index", ) gus.lib.logging.add_arguments(parser) args = parser.parse_args() return args if __name__ == "__main__": main()