From e8e0c89570de9f6891642a6897a5f44a48286016 Mon Sep 17 00:00:00 2001 From: gnat Date: Sat, 28 Sep 2024 01:12:39 -0700 Subject: [PATCH] setup fileserver --- src/lib/logger.py | 2 +- src/lib/patchers.py | 87 ------------------- src/lib/response.py | 2 +- src/lib/router.py | 199 +++++++++++++++++--------------------------- 4 files changed, 78 insertions(+), 212 deletions(-) diff --git a/src/lib/logger.py b/src/lib/logger.py index 3b77d67..588ea7c 100644 --- a/src/lib/logger.py +++ b/src/lib/logger.py @@ -18,5 +18,5 @@ stream_logger.setFormatter(formatter) log.addHandler(file_logger) log.addHandler(stream_logger) -log.info('log initialized') +log.info('log initialized') if not __name__ == 'sludge.src.lib.logger' else ... diff --git a/src/lib/patchers.py b/src/lib/patchers.py index de5e1a9..7d3395d 100644 --- a/src/lib/patchers.py +++ b/src/lib/patchers.py @@ -2,94 +2,7 @@ from .response import Response from typing import Callable, List -import re -import random -from bs4 import BeautifulSoup - type Patcher = Callable[[Response, 'Request'], Response] -def find_substring_in_lines(s, substring): - for line_index, line in enumerate(s.splitlines()): - position = line.find(substring) - if position != -1: - return line_index - - return 0 - -def extract_words_from_line(line): - clean_line = re.sub(r'<[^>]+>', '', line) - words = clean_line.split() - return words - -def uwuify_text(text): - replacements = [ - (r'r', 'w'), - (r'l', 'w'), - (r'R', 'W'), - (r'L', 'W'), - (r'no', 'nyo'), - (r'No', 'Nyo'), - (r'u', 'uwu'), - (r'U', 'Uwu') - ] - - for pattern, replacement in replacements: - text = re.sub(pattern, replacement, text) - - expressions = [" owo", " UwU", " rawr", " >w<"] - sentences = text.split('. ') - uwuified_sentences = [] - - for sentence in sentences: - sentence = sentence.strip() - if sentence: - uwuified_sentences.append(sentence + (random.choice(expressions) if random.randint(0, 5) > 4 else '')) - - return '. '.join(uwuified_sentences) - -def uwuify(body): - body = body.decode('utf-8') - soup = BeautifulSoup(body, 'html.parser') - - for text in soup.find_all(text=True): - if text.parent.name not in ['script', 'style']: - original_text = text.string - words = extract_words_from_line(original_text) - uwuified_words = [uwuify_text(word) for word in words] - uwuified_text = ' '.join(uwuified_words) - text.replace_with(uwuified_text) - - for a_tag in soup.find_all('a', href=True): - original_href = a_tag['href'] - if '?' in original_href: - new_href = f"{original_href}&uwu=true" - else: - new_href = f"{original_href}?uwu=true" - a_tag['href'] = new_href - - - return str(soup) - -def is_subdict(sub_dict, main_dict): - for key, value in sub_dict.items(): - if key not in main_dict or main_dict[key] != value: - return False - return True - patchers: List[Patcher] = [ - # lambda response, request: Response( - # response.code, - # response.headers, - # "\n".join(line.replace('e', 'a') if index > find_substring_in_lines(response.body.decode('utf-8'), '') else line for index, line in enumerate(response.body.decode('utf-8').splitlines())).encode('utf-8') - # ) if 'text/html' in response.headers.values() else response - lambda response, request: Response( - response.code, - response.headers, - uwuify(response.body).encode('utf-8') - ) if 'text/html' in response.headers.values() and is_subdict({'uwu': 'true'}, request.path.params) else response, - lambda response, request: Response( - response.code, - response.headers, - re.sub(r'sludge', lambda match: 'sludge' + ' (/slʌd͡ʒ/)' if random.randint(0, 5) < 1 else 'sludge', response.body.decode('utf-8')).encode('utf-8') - ) if 'text/html' in response.headers.values() else response ] diff --git a/src/lib/response.py b/src/lib/response.py index bbd271b..e363b76 100644 --- a/src/lib/response.py +++ b/src/lib/response.py @@ -4,7 +4,7 @@ from .responsecodes import ResponseCode from .logger import log class Response: - def __init__(self, code: ResponseCode, headers: Dict[str, str], body: bytes): + def __init__(self, code: ResponseCode, headers: Dict[str, str] = dict(), body: bytes = b''): self.code = code self.headers = headers self.body = body diff --git a/src/lib/router.py b/src/lib/router.py index 5130299..11b2101 100644 --- a/src/lib/router.py +++ b/src/lib/router.py @@ -11,6 +11,8 @@ from .patchers import patchers from .logger import log import os import traceback +import mimetypes +import hashlib @dataclass class Route: @@ -37,143 +39,94 @@ class Route: if not self.method_is_allowed(request.method): return False return self.matcher(request.path) +def generate_opengraph_html(file_url): + print('FILE URL: ', file_url) + mime_type, _ = mimetypes.guess_type(file_url) + + file_name = os.path.basename(file_url) + + if mime_type and mime_type.startswith('image/'): + content_type = 'image' + embed_html = f'{file_name}' + elif mime_type and mime_type.startswith('video/'): + content_type = 'video' + embed_html = f'' + else: + content_type = 'document' + embed_html = f'' + + html_content = f""" + + + + + + Embed File: {file_name} + + + + + + + + + + + +

{file_name}

+
+ {embed_html} + + +""" + return html_content + +def is_subdict(sub_dict, main_dict): + for key, value in sub_dict.items(): + if key not in main_dict or main_dict[key] != value: + return False + return True + +def compute_md5(file_path): + md5_hash = hashlib.md5() + + with open(file_path, 'rb') as file: + for chunk in iter(lambda: file.read(4096), b""): + md5_hash.update(chunk) + + return md5_hash.hexdigest() + routes = [ Route( - lambda request: request.path == '/style.css', - [Method.GET], - lambda request, *_: Response( - ResponseCode.OK, - *raw_file_contents('./style.css') - ) - ), - Route( - lambda request: request.path == '/', - [Method.GET, Method.POST], - lambda request, *_: Response( - ResponseCode.OK, - {'Content-Type': 'text/html'}, - (parse_file('./home.html', dict(prev='\\/')).encode('utf-8') if request.method == Method.GET else ( - [ - (lambda form_data: ( - (lambda time: ( - f:=open(f'./files/posts-to-homepage/post_{time}.txt', 'w'), - f.write(f"{form_data['name']}@{time}
{form_data['text']}

"), - f.close() - ))(datetime.now().strftime('%Y-%m-%d_%H:%M:%S-%f')[:-3]) if set(form_data.keys()) == set(['text', 'name']) else None - ))( - reduce( - lambda acc, d: acc.update(d) or acc, - map(lambda key_value_pair: {key_value_pair[0]: remove_html_tags(key_value_pair[1])}, request.body.data.items()), - {} - )), - parse_file('./home.html').encode('utf-8') - ][1] - )) - ) if len(request.body.data) > 0 or request.method != Method.POST else error_page(400) - ), - Route( - lambda path: os.path.isdir('.' + path.path), - [Method.GET], - lambda request, *_: Response( - ResponseCode.OK, - {'Content-Type': 'text/html'}, - parse_file('./dir_index.html', dict(path='.' + request.path.path, prev=request.headers.get('Referer').replace('/', '\\/') if request.headers.has('Referer') else '')).encode('utf-8') - ) - ), - Route( - lambda path: os.path.isfile('.' + path.path) and path.path.startswith('/html/') and (path.path.endswith('.html') or '/thoughts/' in path.path), - [Method.GET], - lambda request, *_: Response( - ResponseCode.OK, - {'Content-Type': 'text/html'}, - parse_file('.' + request.path.path, dict(prev=request.headers.get('Referer').replace('/', '\\/') if request.headers.has('Referer') else '')).encode('utf-8') - ) - ), - Route( - lambda path: os.path.isfile('.' + path.path) and (path.path.startswith('/font/') or path.path.startswith('/files/')), - [Method.GET], - lambda request, *_: Response( - ResponseCode.OK, - *raw_file_contents('.' + request.path.path) - ) - ), - Route( - lambda request: request.path == '/status', - [Method.GET], - lambda *_: Response( - ResponseCode.OK, - {'Content-Type': 'text/html'}, - parse('$[neofetch | ansi2html]').encode('utf-8') - ) - ), - Route( - lambda request: request.path == '/stats/is-its-computer-online', + lambda request: request.path == '/', [Method.GET], lambda *_: Response( ResponseCode.OK, {'Content-Type': 'text/html'}, - page("online-p", """ - seconds since last heartbeat message (less than 60: online; less than 120: maybe; more than 120: probably not): $[echo $(( $(date +%s) - $(stat -c %Y ./files/stats/heartbeat) ))] - """) + parse_file('index.html').encode('utf-8') ) ), Route( - lambda request: request.path == '/stats/what-song-is-it-listening-to', - [Method.GET], - lambda *_: Response( - ResponseCode.OK, - {'Content-type': 'text/html'}, - page("song?", """ - it is listening to $[cat ./files/stats/song] as of $[echo $(( $(date +%s) - $(stat -c %Y ./files/stats/song) ))] seconds ago. - """) - ) - ), - Route( - lambda request: request.path == '/stats/is-this-server-online', - [Method.GET], - lambda *_: Response( - ResponseCode.OK, - {'Content-type': 'text/html'}, - page("server online-p", """ - I think so. - """) - ) - ), - Route( - lambda request: request.path == '/stats/what-is-its-servers-uptime', - [Method.GET], - lambda *_: Response( - ResponseCode.OK, - {'Content-type': 'text/html'}, - page("uptime", """ - $[uptime] - """) - ) - ), - Route( - lambda request: request.path == '/stats/what-vim-buffers-does-it-have-open', - [Method.GET], - lambda *_: Response( - ResponseCode.OK, - {'Content-type': 'text/html'}, - page("vim bufs", """ - $[cat ./files/stats/vim-bufs | xargs -I% echo %'
'] - """) - ) - ), - Route( - lambda request: request.path == '/stats', - [Method.GET], + lambda request: [print(os.getcwd(), '.'+request.path, request.params, os.path.isfile('.'+request.path)), os.path.isfile('.'+request.path) and is_subdict({'embed': 'true'}, request.params)][-1], + [Method.GET], lambda request, *_: Response( ResponseCode.OK, {'Content-Type': 'text/html'}, - parse_file('./html/stats.html', dict(prev=request.headers.get('Referer').replace('/', '\\/') if request.headers.has('Referer') else '')).encode('utf-8') - ) + generate_opengraph_html(f'https://natalieee.net{request.path.path}?hash={request.path.params['hash']}').encode('utf-8') + ) ), Route( - lambda _: True, - [Method.GET], - lambda *_: error_page(404) + lambda request: [print(os.getcwd(), '.'+request.path, request.params, os.path.isfile('.'+request.path)), os.path.isfile('.'+request.path)][-1], + [Method.GET], + lambda request, *_: Response( + ResponseCode.OK, + *raw_file_contents('.'+request.path.path) + ) if request.path.params['hash'] == compute_md5('.'+request.path.path) else error_page(403) ) ]