setup fileserver
This commit is contained in:
parent
a706461634
commit
e8e0c89570
@ -18,5 +18,5 @@ stream_logger.setFormatter(formatter)
|
|||||||
log.addHandler(file_logger)
|
log.addHandler(file_logger)
|
||||||
log.addHandler(stream_logger)
|
log.addHandler(stream_logger)
|
||||||
|
|
||||||
log.info('log initialized')
|
log.info('log initialized') if not __name__ == 'sludge.src.lib.logger' else ...
|
||||||
|
|
||||||
|
@ -2,94 +2,7 @@ from .response import Response
|
|||||||
|
|
||||||
from typing import Callable, List
|
from typing import Callable, List
|
||||||
|
|
||||||
import re
|
|
||||||
import random
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
type Patcher = Callable[[Response, 'Request'], Response]
|
type Patcher = Callable[[Response, 'Request'], Response]
|
||||||
|
|
||||||
def find_substring_in_lines(s, substring):
|
|
||||||
for line_index, line in enumerate(s.splitlines()):
|
|
||||||
position = line.find(substring)
|
|
||||||
if position != -1:
|
|
||||||
return line_index
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def extract_words_from_line(line):
|
|
||||||
clean_line = re.sub(r'<[^>]+>', '', line)
|
|
||||||
words = clean_line.split()
|
|
||||||
return words
|
|
||||||
|
|
||||||
def uwuify_text(text):
|
|
||||||
replacements = [
|
|
||||||
(r'r', 'w'),
|
|
||||||
(r'l', 'w'),
|
|
||||||
(r'R', 'W'),
|
|
||||||
(r'L', 'W'),
|
|
||||||
(r'no', 'nyo'),
|
|
||||||
(r'No', 'Nyo'),
|
|
||||||
(r'u', 'uwu'),
|
|
||||||
(r'U', 'Uwu')
|
|
||||||
]
|
|
||||||
|
|
||||||
for pattern, replacement in replacements:
|
|
||||||
text = re.sub(pattern, replacement, text)
|
|
||||||
|
|
||||||
expressions = [" owo", " UwU", " rawr", " >w<"]
|
|
||||||
sentences = text.split('. ')
|
|
||||||
uwuified_sentences = []
|
|
||||||
|
|
||||||
for sentence in sentences:
|
|
||||||
sentence = sentence.strip()
|
|
||||||
if sentence:
|
|
||||||
uwuified_sentences.append(sentence + (random.choice(expressions) if random.randint(0, 5) > 4 else ''))
|
|
||||||
|
|
||||||
return '. '.join(uwuified_sentences)
|
|
||||||
|
|
||||||
def uwuify(body):
|
|
||||||
body = body.decode('utf-8')
|
|
||||||
soup = BeautifulSoup(body, 'html.parser')
|
|
||||||
|
|
||||||
for text in soup.find_all(text=True):
|
|
||||||
if text.parent.name not in ['script', 'style']:
|
|
||||||
original_text = text.string
|
|
||||||
words = extract_words_from_line(original_text)
|
|
||||||
uwuified_words = [uwuify_text(word) for word in words]
|
|
||||||
uwuified_text = ' '.join(uwuified_words)
|
|
||||||
text.replace_with(uwuified_text)
|
|
||||||
|
|
||||||
for a_tag in soup.find_all('a', href=True):
|
|
||||||
original_href = a_tag['href']
|
|
||||||
if '?' in original_href:
|
|
||||||
new_href = f"{original_href}&uwu=true"
|
|
||||||
else:
|
|
||||||
new_href = f"{original_href}?uwu=true"
|
|
||||||
a_tag['href'] = new_href
|
|
||||||
|
|
||||||
|
|
||||||
return str(soup)
|
|
||||||
|
|
||||||
def is_subdict(sub_dict, main_dict):
|
|
||||||
for key, value in sub_dict.items():
|
|
||||||
if key not in main_dict or main_dict[key] != value:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
patchers: List[Patcher] = [
|
patchers: List[Patcher] = [
|
||||||
# lambda response, request: Response(
|
|
||||||
# response.code,
|
|
||||||
# response.headers,
|
|
||||||
# "\n".join(line.replace('e', 'a') if index > find_substring_in_lines(response.body.decode('utf-8'), '</head>') else line for index, line in enumerate(response.body.decode('utf-8').splitlines())).encode('utf-8')
|
|
||||||
# ) if 'text/html' in response.headers.values() else response
|
|
||||||
lambda response, request: Response(
|
|
||||||
response.code,
|
|
||||||
response.headers,
|
|
||||||
uwuify(response.body).encode('utf-8')
|
|
||||||
) if 'text/html' in response.headers.values() and is_subdict({'uwu': 'true'}, request.path.params) else response,
|
|
||||||
lambda response, request: Response(
|
|
||||||
response.code,
|
|
||||||
response.headers,
|
|
||||||
re.sub(r'sludge', lambda match: 'sludge' + ' (/slʌd͡ʒ/)' if random.randint(0, 5) < 1 else 'sludge', response.body.decode('utf-8')).encode('utf-8')
|
|
||||||
) if 'text/html' in response.headers.values() else response
|
|
||||||
]
|
]
|
||||||
|
@ -4,7 +4,7 @@ from .responsecodes import ResponseCode
|
|||||||
from .logger import log
|
from .logger import log
|
||||||
|
|
||||||
class Response:
|
class Response:
|
||||||
def __init__(self, code: ResponseCode, headers: Dict[str, str], body: bytes):
|
def __init__(self, code: ResponseCode, headers: Dict[str, str] = dict(), body: bytes = b''):
|
||||||
self.code = code
|
self.code = code
|
||||||
self.headers = headers
|
self.headers = headers
|
||||||
self.body = body
|
self.body = body
|
||||||
|
@ -11,6 +11,8 @@ from .patchers import patchers
|
|||||||
from .logger import log
|
from .logger import log
|
||||||
import os
|
import os
|
||||||
import traceback
|
import traceback
|
||||||
|
import mimetypes
|
||||||
|
import hashlib
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Route:
|
class Route:
|
||||||
@ -37,143 +39,94 @@ class Route:
|
|||||||
if not self.method_is_allowed(request.method): return False
|
if not self.method_is_allowed(request.method): return False
|
||||||
return self.matcher(request.path)
|
return self.matcher(request.path)
|
||||||
|
|
||||||
|
def generate_opengraph_html(file_url):
|
||||||
|
print('FILE URL: ', file_url)
|
||||||
|
mime_type, _ = mimetypes.guess_type(file_url)
|
||||||
|
|
||||||
|
file_name = os.path.basename(file_url)
|
||||||
|
|
||||||
|
if mime_type and mime_type.startswith('image/'):
|
||||||
|
content_type = 'image'
|
||||||
|
embed_html = f'<img src="{file_url}" alt="{file_name}" style="max-width: 100%; height: auto;">'
|
||||||
|
elif mime_type and mime_type.startswith('video/'):
|
||||||
|
content_type = 'video'
|
||||||
|
embed_html = f'<video controls style="max-width: 100%;"><source src="{file_url}" type="{mime_type}">your browser does not support the video tag.</video>'
|
||||||
|
else:
|
||||||
|
content_type = 'document'
|
||||||
|
embed_html = f'<iframe src="{file_url}" title="{file_name}" style="width: 100%; height: 600px; border: none;"></iframe>'
|
||||||
|
|
||||||
|
html_content = f"""
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Embed File: {file_name}</title>
|
||||||
|
|
||||||
|
<!-- OpenGraph Meta Tags -->
|
||||||
|
<meta property="og:title" content="{file_name}" />
|
||||||
|
<meta property="og:type" content="{content_type}" />
|
||||||
|
<meta property="og:url" content="{file_url}" />
|
||||||
|
<meta property="og:description" content="Embedded file: {file_name}" />
|
||||||
|
<meta property="og:image" content="URL_TO_AN_IMAGE" /> <!-- Optional: Replace with an actual image URL -->
|
||||||
|
|
||||||
|
<style>
|
||||||
|
body {{
|
||||||
|
font-family: Arial, sans-serif;
|
||||||
|
margin: 20px;
|
||||||
|
}}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<p>{file_name}</p>
|
||||||
|
<hr>
|
||||||
|
{embed_html}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
return html_content
|
||||||
|
|
||||||
|
def is_subdict(sub_dict, main_dict):
|
||||||
|
for key, value in sub_dict.items():
|
||||||
|
if key not in main_dict or main_dict[key] != value:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def compute_md5(file_path):
|
||||||
|
md5_hash = hashlib.md5()
|
||||||
|
|
||||||
|
with open(file_path, 'rb') as file:
|
||||||
|
for chunk in iter(lambda: file.read(4096), b""):
|
||||||
|
md5_hash.update(chunk)
|
||||||
|
|
||||||
|
return md5_hash.hexdigest()
|
||||||
|
|
||||||
routes = [
|
routes = [
|
||||||
Route(
|
Route(
|
||||||
lambda request: request.path == '/style.css',
|
lambda request: request.path == '/',
|
||||||
[Method.GET],
|
|
||||||
lambda request, *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
*raw_file_contents('./style.css')
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/',
|
|
||||||
[Method.GET, Method.POST],
|
|
||||||
lambda request, *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-Type': 'text/html'},
|
|
||||||
(parse_file('./home.html', dict(prev='\\/')).encode('utf-8') if request.method == Method.GET else (
|
|
||||||
[
|
|
||||||
(lambda form_data: (
|
|
||||||
(lambda time: (
|
|
||||||
f:=open(f'./files/posts-to-homepage/post_{time}.txt', 'w'),
|
|
||||||
f.write(f"<i style='font-family: MapleMonoItalic'>{form_data['name']}</i>@{time}<br>{form_data['text']}<br><br>"),
|
|
||||||
f.close()
|
|
||||||
))(datetime.now().strftime('%Y-%m-%d_%H:%M:%S-%f')[:-3]) if set(form_data.keys()) == set(['text', 'name']) else None
|
|
||||||
))(
|
|
||||||
reduce(
|
|
||||||
lambda acc, d: acc.update(d) or acc,
|
|
||||||
map(lambda key_value_pair: {key_value_pair[0]: remove_html_tags(key_value_pair[1])}, request.body.data.items()),
|
|
||||||
{}
|
|
||||||
)),
|
|
||||||
parse_file('./home.html').encode('utf-8')
|
|
||||||
][1]
|
|
||||||
))
|
|
||||||
) if len(request.body.data) > 0 or request.method != Method.POST else error_page(400)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda path: os.path.isdir('.' + path.path),
|
|
||||||
[Method.GET],
|
|
||||||
lambda request, *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-Type': 'text/html'},
|
|
||||||
parse_file('./dir_index.html', dict(path='.' + request.path.path, prev=request.headers.get('Referer').replace('/', '\\/') if request.headers.has('Referer') else '')).encode('utf-8')
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda path: os.path.isfile('.' + path.path) and path.path.startswith('/html/') and (path.path.endswith('.html') or '/thoughts/' in path.path),
|
|
||||||
[Method.GET],
|
|
||||||
lambda request, *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-Type': 'text/html'},
|
|
||||||
parse_file('.' + request.path.path, dict(prev=request.headers.get('Referer').replace('/', '\\/') if request.headers.has('Referer') else '')).encode('utf-8')
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda path: os.path.isfile('.' + path.path) and (path.path.startswith('/font/') or path.path.startswith('/files/')),
|
|
||||||
[Method.GET],
|
|
||||||
lambda request, *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
*raw_file_contents('.' + request.path.path)
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/status',
|
|
||||||
[Method.GET],
|
|
||||||
lambda *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-Type': 'text/html'},
|
|
||||||
parse('<style>$[cat style.css]</style>$[neofetch | ansi2html]').encode('utf-8')
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/stats/is-its-computer-online',
|
|
||||||
[Method.GET],
|
[Method.GET],
|
||||||
lambda *_: Response(
|
lambda *_: Response(
|
||||||
ResponseCode.OK,
|
ResponseCode.OK,
|
||||||
{'Content-Type': 'text/html'},
|
{'Content-Type': 'text/html'},
|
||||||
page("online-p", """
|
parse_file('index.html').encode('utf-8')
|
||||||
seconds since last heartbeat message (less than 60: online; less than 120: maybe; more than 120: probably not): $[echo $(( $(date +%s) - $(stat -c %Y ./files/stats/heartbeat) ))]
|
|
||||||
""")
|
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
Route(
|
Route(
|
||||||
lambda request: request.path == '/stats/what-song-is-it-listening-to',
|
lambda request: [print(os.getcwd(), '.'+request.path, request.params, os.path.isfile('.'+request.path)), os.path.isfile('.'+request.path) and is_subdict({'embed': 'true'}, request.params)][-1],
|
||||||
[Method.GET],
|
[Method.GET],
|
||||||
lambda *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-type': 'text/html'},
|
|
||||||
page("song?", """
|
|
||||||
it is listening to $[cat ./files/stats/song] as of $[echo $(( $(date +%s) - $(stat -c %Y ./files/stats/song) ))] seconds ago.
|
|
||||||
""")
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/stats/is-this-server-online',
|
|
||||||
[Method.GET],
|
|
||||||
lambda *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-type': 'text/html'},
|
|
||||||
page("server online-p", """
|
|
||||||
I think so.
|
|
||||||
""")
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/stats/what-is-its-servers-uptime',
|
|
||||||
[Method.GET],
|
|
||||||
lambda *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-type': 'text/html'},
|
|
||||||
page("uptime", """
|
|
||||||
$[uptime]
|
|
||||||
""")
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/stats/what-vim-buffers-does-it-have-open',
|
|
||||||
[Method.GET],
|
|
||||||
lambda *_: Response(
|
|
||||||
ResponseCode.OK,
|
|
||||||
{'Content-type': 'text/html'},
|
|
||||||
page("vim bufs", """
|
|
||||||
$[cat ./files/stats/vim-bufs | xargs -I% echo %'<br>']
|
|
||||||
""")
|
|
||||||
)
|
|
||||||
),
|
|
||||||
Route(
|
|
||||||
lambda request: request.path == '/stats',
|
|
||||||
[Method.GET],
|
|
||||||
lambda request, *_: Response(
|
lambda request, *_: Response(
|
||||||
ResponseCode.OK,
|
ResponseCode.OK,
|
||||||
{'Content-Type': 'text/html'},
|
{'Content-Type': 'text/html'},
|
||||||
parse_file('./html/stats.html', dict(prev=request.headers.get('Referer').replace('/', '\\/') if request.headers.has('Referer') else '')).encode('utf-8')
|
generate_opengraph_html(f'https://natalieee.net{request.path.path}?hash={request.path.params['hash']}').encode('utf-8')
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
Route(
|
Route(
|
||||||
lambda _: True,
|
lambda request: [print(os.getcwd(), '.'+request.path, request.params, os.path.isfile('.'+request.path)), os.path.isfile('.'+request.path)][-1],
|
||||||
[Method.GET],
|
[Method.GET],
|
||||||
lambda *_: error_page(404)
|
lambda request, *_: Response(
|
||||||
|
ResponseCode.OK,
|
||||||
|
*raw_file_contents('.'+request.path.path)
|
||||||
|
) if request.path.params['hash'] == compute_md5('.'+request.path.path) else error_page(403)
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user