From b817f3460775a04ccd4d197ef82405026e946c42 Mon Sep 17 00:00:00 2001 From: Space-Banane Date: Mon, 6 Apr 2026 23:03:21 +0200 Subject: [PATCH] refactor: standardize string formatting and improve code readability --- batch.py | 77 ++++++++++++++++++++++++++++-------------- with_ui.py | 99 +++++++++++++++++++++++++++++------------------------- 2 files changed, 106 insertions(+), 70 deletions(-) diff --git a/batch.py b/batch.py index 6da9ba6..7e93c5c 100644 --- a/batch.py +++ b/batch.py @@ -16,29 +16,37 @@ URLS = [ URLS_JSON = os.environ.get("URLS_JSON", str(Path(__file__).parent / "urls.json")) -RESET = "\033[0m" -BOLD = "\033[1m" -RED = "\033[91m" -GREEN = "\033[92m" +RESET = "\033[0m" +BOLD = "\033[1m" +RED = "\033[91m" +GREEN = "\033[92m" YELLOW = "\033[93m" -CYAN = "\033[96m" -DIM = "\033[2m" +CYAN = "\033[96m" +DIM = "\033[2m" + def log(msg, color=RESET): print(f"{color}{msg}{RESET}", flush=True) + def get_post_id(url): return url.rstrip("/").split("-")[-1] + def fetch_info(url): cmd = [ YTDLP, - "--dump-json", "--no-playlist", - "--cookies", COOKIES, - "--extractor-args", "generic:impersonate", - url + "--dump-json", + "--no-playlist", + "--cookies", + COOKIES, + "--extractor-args", + "generic:impersonate", + url, ] - result = subprocess.run(cmd, capture_output=True, text=True, timeout=30, cwd=Path(__file__).parent) + result = subprocess.run( + cmd, capture_output=True, text=True, timeout=30, cwd=Path(__file__).parent + ) if result.returncode != 0: return None, result.stderr.strip() try: @@ -46,31 +54,41 @@ def fetch_info(url): except json.JSONDecodeError: return None, "Failed to parse JSON" + def download(url, out_dir): cmd = [ YTDLP, - "-f", "bestvideo+bestaudio/best", + "-f", + "bestvideo+bestaudio/best", "--prefer-free-formats", - "--cookies", COOKIES, - "--extractor-args", "generic:impersonate", - "--merge-output-format", "mp4", - "-o", str(out_dir / "%(title)s.%(ext)s"), - url + "--cookies", + COOKIES, + "--extractor-args", + "generic:impersonate", + "--merge-output-format", + "mp4", + "-o", + str(out_dir / "%(title)s.%(ext)s"), + url, ] - result = subprocess.run(cmd, capture_output=True, text=True, cwd=Path(__file__).parent) + result = subprocess.run( + cmd, capture_output=True, text=True, cwd=Path(__file__).parent + ) return result.returncode == 0, (result.stdout + result.stderr).strip() + def sanitize(name, max_len=60): safe = "".join(c if c.isalnum() or c in " _-." else "_" for c in name) return safe.strip()[:max_len].strip("_. ") + def main(): OUTPUT_DIR.mkdir(parents=True, exist_ok=True) total = len(URLS) # Load from JSON if present and non-empty try: if Path(URLS_JSON).exists(): - with open(URLS_JSON, 'r', encoding='utf-8') as fh: + with open(URLS_JSON, "r", encoding="utf-8") as fh: data = json.load(fh) if isinstance(data, list) and data: URLS.clear() @@ -95,7 +113,9 @@ def main(): if info is None: # Likely a text post or unavailable - log(f" {YELLOW}⚠ skipped — no media ({err[:80] if err else 'no video found'}){RESET}") + log( + f" {YELLOW}⚠ skipped — no media ({err[:80] if err else 'no video found'}){RESET}" + ) skipped.append((url, err)) print() continue @@ -115,7 +135,9 @@ def main(): # Find what was downloaded files = list(out_dir.iterdir()) sizes = [f"{f.stat().st_size / 1e6:.1f} MB" for f in files if f.is_file()] - log(f" {GREEN}✓ done — {', '.join(sizes) if sizes else 'file saved'}{RESET}") + log( + f" {GREEN}✓ done — {', '.join(sizes) if sizes else 'file saved'}{RESET}" + ) ok.append(url) else: # Check if it's just no video (text post that slipped through info check) @@ -123,14 +145,18 @@ def main(): log(f" {YELLOW}⚠ skipped — text post{RESET}") skipped.append((url, "no video content")) # Remove empty dir - try: out_dir.rmdir() - except OSError: pass + try: + out_dir.rmdir() + except OSError: + pass else: log(f" {RED}✗ failed{RESET}") # Print last few lines of output for context for line in output.splitlines()[-3:]: log(f" {DIM}{line}{RESET}") - failed.append((url, output.splitlines()[-1] if output else "unknown error")) + failed.append( + (url, output.splitlines()[-1] if output else "unknown error") + ) print() @@ -150,5 +176,6 @@ def main(): log(f"{'━'*55}\n", CYAN) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/with_ui.py b/with_ui.py index edd5b64..53dc279 100644 --- a/with_ui.py +++ b/with_ui.py @@ -7,7 +7,7 @@ from flask import Flask, request, jsonify, Response, stream_with_context from flask_cors import CORS import time -app = Flask(__name__, static_folder='.', static_url_path='') +app = Flask(__name__, static_folder=".", static_url_path="") CORS(app) # Store active jobs: {job_id: {"lines": [], "done": bool, "error": bool}} @@ -29,7 +29,7 @@ def run_ytdlp(job_id, cmd): stderr=subprocess.STDOUT, text=True, bufsize=1, - cwd=DOWNLOAD_DIR + cwd=DOWNLOAD_DIR, ) for line in process.stdout: line = line.rstrip() @@ -47,73 +47,79 @@ def run_ytdlp(job_id, cmd): jobs[job_id]["error"] = True -@app.route('/') +@app.route("/") def index(): - return app.send_static_file('frontend.html') + return app.send_static_file("frontend.html") -@app.route('/api/info', methods=['POST']) +@app.route("/api/info", methods=["POST"]) def get_info(): data = request.json - url = data.get('url', '').strip() - cookies = data.get('cookies', '').strip() + url = data.get("url", "").strip() + cookies = data.get("cookies", "").strip() if not url: return jsonify({"error": "No URL provided"}), 400 - cmd = [YTDLP_PATH, '--dump-json', '--no-playlist'] + cmd = [YTDLP_PATH, "--dump-json", "--no-playlist"] if cookies: - cmd += ['--cookies', cookies] - cmd += ['--extractor-args', 'generic:impersonate', url] + cmd += ["--cookies", cookies] + cmd += ["--extractor-args", "generic:impersonate", url] try: - result = subprocess.run(cmd, capture_output=True, text=True, timeout=30, cwd=DOWNLOAD_DIR) + result = subprocess.run( + cmd, capture_output=True, text=True, timeout=30, cwd=DOWNLOAD_DIR + ) if result.returncode != 0: return jsonify({"error": result.stderr or "Failed to fetch info"}), 400 info = json.loads(result.stdout) formats = [] - for f in info.get('formats', []): - formats.append({ - "id": f.get('format_id'), - "ext": f.get('ext'), - "resolution": f.get('resolution') or f.get('format_note') or '', - "vcodec": f.get('vcodec', 'none'), - "acodec": f.get('acodec', 'none'), - "filesize": f.get('filesize') or f.get('filesize_approx'), - "tbr": f.get('tbr'), - "label": f.get('format'), - }) - return jsonify({ - "title": info.get('title', 'Unknown'), - "thumbnail": info.get('thumbnail'), - "duration": info.get('duration'), - "uploader": info.get('uploader'), - "formats": formats - }) + for f in info.get("formats", []): + formats.append( + { + "id": f.get("format_id"), + "ext": f.get("ext"), + "resolution": f.get("resolution") or f.get("format_note") or "", + "vcodec": f.get("vcodec", "none"), + "acodec": f.get("acodec", "none"), + "filesize": f.get("filesize") or f.get("filesize_approx"), + "tbr": f.get("tbr"), + "label": f.get("format"), + } + ) + return jsonify( + { + "title": info.get("title", "Unknown"), + "thumbnail": info.get("thumbnail"), + "duration": info.get("duration"), + "uploader": info.get("uploader"), + "formats": formats, + } + ) except subprocess.TimeoutExpired: return jsonify({"error": "Timed out fetching video info"}), 408 except Exception as e: return jsonify({"error": str(e)}), 500 -@app.route('/api/download', methods=['POST']) +@app.route("/api/download", methods=["POST"]) def start_download(): data = request.json - url = data.get('url', '').strip() - format_id = data.get('format_id', '').strip() - cookies = data.get('cookies', '').strip() - extra_args = data.get('extra_args', '').strip() + url = data.get("url", "").strip() + format_id = data.get("format_id", "").strip() + cookies = data.get("cookies", "").strip() + extra_args = data.get("extra_args", "").strip() if not url: return jsonify({"error": "No URL provided"}), 400 cmd = [YTDLP_PATH] if format_id: - cmd += ['-f', format_id] - cmd += ['--prefer-free-formats'] + cmd += ["-f", format_id] + cmd += ["--prefer-free-formats"] if cookies: - cmd += ['--cookies', cookies] - cmd += ['--extractor-args', 'generic:impersonate'] + cmd += ["--cookies", cookies] + cmd += ["--extractor-args", "generic:impersonate"] if extra_args: cmd += extra_args.split() cmd.append(url) @@ -125,7 +131,7 @@ def start_download(): return jsonify({"job_id": job_id}) -@app.route('/api/status/') +@app.route("/api/status/") def job_status(job_id): def generate(): sent = 0 @@ -135,20 +141,23 @@ def job_status(job_id): if not job: yield f"data: {json.dumps({'error': 'Job not found'})}\n\n" break - lines = job['lines'] + lines = job["lines"] while sent < len(lines): yield f"data: {json.dumps({'line': lines[sent]})}\n\n" sent += 1 - if job['done']: + if job["done"]: yield f"data: {json.dumps({'done': True, 'error': job['error']})}\n\n" break time.sleep(0.2) - return Response(stream_with_context(generate()), mimetype='text/event-stream', - headers={'Cache-Control': 'no-cache', 'X-Accel-Buffering': 'no'}) + return Response( + stream_with_context(generate()), + mimetype="text/event-stream", + headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, + ) -if __name__ == '__main__': +if __name__ == "__main__": print(f"[yt-dlp UI] Serving on http://localhost:5000") print(f"[yt-dlp UI] Download directory: {DOWNLOAD_DIR}") - app.run(debug=False, host='0.0.0.0', port=5000, threaded=True) \ No newline at end of file + app.run(debug=False, host="0.0.0.0", port=5000, threaded=True)