import streamlit as st
import requests
import os
import datetime
import base64
from pathlib import Path
# Configuration
ST_BACKEND_URL = os.getenv("BACKEND_URL", "http://localhost:5001")
st.set_page_config(page_title="Novel to Manga", layout="wide", page_icon="đ¨")
# Constants
MODEL_OPTIONS = {
"OpenAI": ["gpt-4o", "gpt-4-turbo", "gpt-3.5-turbo"],
"SiliconFlow": ["deepseek-ai/DeepSeek-V2.5", "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Meta-Llama-3.1-405B-Instruct"],
"AIHubMix": ["gpt-4o", "claude-3-5-sonnet-20240620", "gemini-1.5-pro", "gemini-2.0-flash-exp"],
"DeepSeek": ["deepseek-chat", "deepseek-coder"],
"Custom": []
}
IMAGE_MODEL_OPTIONS = {
"OpenAI": ["dall-e-3", "dall-e-2"],
"AIHubMix": ["gpt-image-1", "g-nano-banana-pro", "imagen-4.0-generate-001", "flux-pro", "midjourney"],
"SiliconFlow": ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev", "stabilityai/stable-diffusion-3-5-large"],
"DeepSeek": ["deepseek-chat"],
"Custom": []
}
# Load decorative images
images_dir = Path(__file__).parent.parent / "images"
# Load character image for top-right corner
char_path = images_dir / "垎俥ĺžç_20260108172035_101_32.jpg"
char_encoded = ""
if char_path.exists():
with open(char_path, "rb") as f:
char_encoded = base64.b64encode(f.read()).decode()
# Load decorative image for bottom-left corner
bottom_left_path = images_dir / "background.png"
bottom_left_encoded = ""
if bottom_left_path.exists():
with open(bottom_left_path, "rb") as f:
bottom_left_encoded = base64.b64encode(f.read()).decode()
# Load decorative image for sidebar accent
sidebar_accent_path = images_dir / "Pasted image (2).png"
sidebar_accent_encoded = ""
if sidebar_accent_path.exists():
with open(sidebar_accent_path, "rb") as f:
sidebar_accent_encoded = base64.b64encode(f.read()).decode()
# Custom CSS with Light Transparent Background
st.markdown(f"""
""", unsafe_allow_html=True)
# Add top-right character image - No whitespace
if char_encoded:
st.markdown(f"""
""", unsafe_allow_html=True)
st.title("đ Novel to Manga Converter")
st.markdown("Transform your stories into visual manga panels with AI.")
# Sidebar for Settings
with st.sidebar:
st.header("âď¸ Settings")
# Provider Selection
provider = st.selectbox(
"API Provider",
["OpenAI", "SiliconFlow", "AIHubMix", "DeepSeek", "Custom"],
help="Select your API provider to auto-fill Base URL"
)
# Defaults based on provider
default_base_url = "https://api.openai.com/v1"
default_model = "gpt-4o"
if provider == "SiliconFlow":
default_base_url = "https://api.siliconflow.cn/v1"
default_model = "deepseek-ai/DeepSeek-V2.5"
elif provider == "AIHubMix":
default_base_url = "https://api.aihubmix.com/v1"
default_model = "gpt-4o"
elif provider == "DeepSeek":
default_base_url = "https://api.deepseek.com"
default_model = "deepseek-chat"
api_key_openai = st.text_input(f"{provider} API Key", type="password", help="Required for generating prompts")
base_url = st.text_input("Base URL", value=default_base_url)
# Model Selection (Text)
if provider == "Custom":
model_name = st.text_input("Model Name", value=default_model)
else:
available_models = MODEL_OPTIONS.get(provider, [default_model])
model_name = st.selectbox("Model Name", available_models, index=0 if default_model in available_models else 0)
st.divider()
st.subheader("Image Generation")
image_provider = st.selectbox(
"Image Provider",
["OpenAI", "AIHubMix", "SiliconFlow", "DeepSeek", "Custom"],
index=0,
help="Select provider for Image Generation"
)
# Image Gen Defaults
default_img_base = "https://api.openai.com/v1"
default_img_model = "dall-e-3"
if image_provider == "AIHubMix":
default_img_base = "https://api.aihubmix.com/v1"
default_img_model = "gpt-image-1"
elif image_provider == "SiliconFlow":
default_img_base = "https://api.siliconflow.cn/v1"
default_img_model = "black-forest-labs/FLUX.1-schnell"
api_key_image = st.text_input("Image Gen API Key (Optional)", type="password", help="Leave blank to use above key")
image_base_url = st.text_input("Image Base URL", value=default_img_base)
# Image Model Selection
if image_provider == "Custom":
image_model_name = st.text_input("Image Model", value=default_img_model, help="e.g. dall-e-3, gpt-image-1")
else:
available_img_models = IMAGE_MODEL_OPTIONS.get(image_provider, [default_img_model])
# Try to find default index, else 0
idx = 0
if default_img_model in available_img_models:
idx = available_img_models.index(default_img_model)
image_model_name = st.selectbox("Image Model", available_img_models, index=idx)
st.divider()
st.info(f"Backend expected at: `{ST_BACKEND_URL}`")
st.markdown("Ensuring backend is running...")
# Main Area
tab1, tab2 = st.tabs(["⨠Create", "đ History"])
with tab1:
st.subheader("1. Input Novel Text")
novel_text = st.text_area("Paste your novel text here... (Paragraphs separated by double newlines)", height=200, placeholder="The hero stood on the cliff edge...")
if st.button("đ Analyze & Generate Prompts", type="primary"):
if not novel_text:
st.error("Please enter some text.")
elif not api_key_openai and not os.getenv("OPENAI_API_KEY"):
st.warning("Please provide an OpenAI API Key.")
else:
with st.spinner("Analyzing text and generating prompts..."):
try:
payload = {
"text": novel_text,
"api_key": api_key_openai,
"base_url": base_url if base_url else None,
"model": model_name
}
response = requests.post(f"{ST_BACKEND_URL}/process_text", json=payload)
if response.status_code == 200:
prompts = response.json().get("prompts", [])
st.session_state['prompts'] = prompts
st.session_state['novel_text'] = novel_text
st.success(f"Generated {len(prompts)} prompts!")
else:
st.error(f"Backend Error: {response.text}")
except requests.exceptions.ConnectionError:
st.error(f"Cannot connect to backend at {ST_BACKEND_URL}. Is the Flask app running?")
except Exception as e:
st.error(f"Error: {e}")
# Display Prompts and Image Gen
if 'prompts' in st.session_state and st.session_state['prompts']:
st.divider()
st.subheader("2. Review Prompts & Generate Images")
# Grid layout for panels
for i, item in enumerate(st.session_state['prompts']):
with st.container():
st.markdown(f"### Panel {i+1}")
col1, col2 = st.columns([1, 1])
with col1:
st.caption("Original Text")
st.info(item['paragraph'])
with col2:
st.caption("Manga Prompt (Editable)")
prompt_key = f"prompt_{i}"
# Initialize default prompt if not edited
if prompt_key not in st.session_state:
st.session_state[prompt_key] = item['prompt']
prompt_text = st.text_area("Prompt", key=prompt_key, label_visibility="collapsed", height=100)
if st.button(f"đ¨ Generate Image for Panel {i+1}", key=f"btn_gen_{i}"):
with st.spinner("Drawing..."):
try:
# Use the edited prompt
payload = {
"prompt": prompt_text,
"api_key": api_key_image or api_key_openai, # Fallback
"base_url": image_base_url if image_base_url else None,
"model": image_model_name,
"novel_text": item['paragraph'] # Save context
}
res = requests.post(f"{ST_BACKEND_URL}/generate_image", json=payload)
if res.status_code == 200:
img_url = res.json().get("image_url")
st.session_state[f"image_{i}"] = img_url
# Fetch bytes for download
try:
img_data = requests.get(img_url).content
st.session_state[f"image_data_{i}"] = img_data
except:
pass
else:
st.error(f"Backend Error: {res.text}")
except Exception as e:
st.error(f"Error: {e}")
if f"image_{i}" in st.session_state:
st.image(st.session_state[f"image_{i}"], use_container_width=True)
if f"image_data_{i}" in st.session_state:
st.download_button(
label="âŹď¸ Download Panel",
data=st.session_state[f"image_data_{i}"],
file_name=f"panel_{i+1}.png",
mime="image/png",
key=f"dl_{i}"
)
with tab2:
st.header("History")
if st.button("đ Refresh History"):
try:
res = requests.get(f"{ST_BACKEND_URL}/history")
if res.status_code == 200:
history = res.json().get("history", [])
st.session_state['history'] = history
if not history:
st.info("No history found.")
else:
st.error("Failed to fetch history")
except requests.exceptions.ConnectionError:
st.error(f"Cannot connect to backend.")
if 'history' in st.session_state:
for item in st.session_state['history']:
ts = item.get('timestamp')
date_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') if ts else "Unknown Date"
with st.expander(f"Generations from {date_str} - {item.get('prompt')[:30]}..."):
col_h1, col_h2 = st.columns([1, 2])
with col_h1:
st.image(item.get('image_url'), caption="Generated Image")
with col_h2:
st.markdown("**Prompt:**")
st.code(item.get('prompt'))
if item.get('novel_text'):
st.markdown("**Original Text:**")
st.write(item.get('novel_text'))