1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
| import openai import json import logging from concurrent.futures import ThreadPoolExecutor, as_completed
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class MarkdownTranslator: def __init__(self, config_file): self.config = self.load_config(config_file) openai.api_key = self.config.get('OPENAI_API_KEY') openai.base_url = self.config.get('OPENAI_API_BASE') openai.default_headers = {"x-foo": "true"}
def load_config(self, config_file): try: with open(config_file, 'r', encoding='utf-8') as file: config = json.load(file) return config except Exception as e: logging.error(f"Error reading config file {config_file}: {e}") raise
def read_markdown(self, file_path): try: with open(file_path, 'r', encoding='utf-8') as file: return file.read() except Exception as e: logging.error(f"Error reading file {file_path}: {e}") raise
def write_markdown(self, file_path, content): try: with open(file_path, 'w', encoding='utf-8') as file: file.write(content) except Exception as e: logging.error(f"Error writing file {file_path}: {e}") raise
def translate_text(self, text, source_lang='en', target_lang='zh'): try: response = openai.chat.completions.create( model="gpt-4o-mini", messages=[ { "role": "user", "content": f"请将以下{source_lang}文本翻译成{target_lang}:\n{text}" } ] ) return response.choices[0].message.content.strip() except Exception as e: logging.error(f"Error translating text: {e}") return text
def process_markdown_content(self, content, source_lang, target_lang): lines = content.split('\n') translated_lines = []
def translate_line(index, line): if line.strip(): translated_line = self.translate_text(line, source_lang, target_lang) translated_lines.append((index, translated_line)) else: translated_lines.append((index, ''))
with ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(translate_line, i, line) for i, line in enumerate(lines)] for future in as_completed(futures): future.result()
translated_lines.sort(key=lambda x: x[0]) return '\n'.join(line for _, line in translated_lines)
def translate_file(self, input_file, output_file, source_lang='en', target_lang='zh'): logging.info(f"Translating file from {source_lang} to {target_lang}...") logging.info(f"OpenAi_key: {openai.api_key}") logging.info(f"OpenAi_base: {openai.base_url}")
markdown_content = self.read_markdown(input_file)
translated_content = self.process_markdown_content(markdown_content, source_lang, target_lang)
self.write_markdown(output_file, translated_content)
if __name__ == "__main__": input_file_path = 'input.md' output_file_path = 'output.md'
source_language = 'en' target_language = 'zh'
translator = MarkdownTranslator('config.json') translator.translate_file(input_file_path, output_file_path, source_language, target_language)
|