From f48efed3b1d4644b57152cf92c1972bf32e2e515 Mon Sep 17 00:00:00 2001 From: Musab Erdem Date: Wed, 13 Nov 2024 11:13:52 +0100 Subject: [PATCH] =?UTF-8?q?main.py=20hinzugef=C3=BCgt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 main.py diff --git a/main.py b/main.py new file mode 100644 index 0000000..46782a6 --- /dev/null +++ b/main.py @@ -0,0 +1,108 @@ +from llama_cpp import Llama + +llm_small = Llama.from_pretrained( + repo_id="bartowski/Llama-3.2-1B-Instruct-GGUF", + filename="Llama-3.2-1B-Instruct-IQ3_M.gguf", +) + +llm_big = Llama.from_pretrained( + repo_id="bartowski/Llama-3.2-3B-Instruct-GGUF", + filename="Llama-3.2-3B-Instruct-IQ3_M.gguf", +) + +while(True): + content = input("\n########################################\n\nWas soll die KI tun?:\n > ") + + converter_system_prompt = ( + "You are a digital AI statement formulator. " + "The user will tell you what he wants. " + "You then formulate the appropriate prompt for a coder AI, which solves this task with the help of code. " + "You will note do the task, you just formulate it to a prompt!" + "You will not answer any question!" + "Do not write the code yourself!" + "Do not tell what you are going to do!" + "Your output should be a direct command for programming." + "Say explicitly that a Python code is to be created." + #"For example, if the user asks you for the time, you should create a prompt that says that a code should be written that outputs the current time." + ) + + converter_response = llm_big.create_chat_completion( + messages=[ + { + "role": "system", + "content": converter_system_prompt + }, + { + "role": "user", + "content": content + } + ] + ) + + converter_output = converter_response["choices"][0]["message"]["content"] + + + coder_response = llm_big.create_chat_completion( + messages=[ + { + "role": "system", + "content": ( + "Write only Python code." + "Do not use any 'def'." + "Write a one-liner." + "Do not add any comments." + "Do not add any other information or explanations to the code." + "Your code should be executable without errors." + ) + }, + { + "role": "user", + "content": converter_output + } + ] + ) + + coder_output = coder_response["choices"][0]["message"]["content"] + + final_response = llm_big.create_chat_completion( + messages=[ + { + "role": "system", + "content": ( + "Remove any unnecessary characters from the Python code. " + "The function of the code should not change. " + "Remove all comments and other explanations. " + "The code should stand alone. " + "Look over the code again and revise it if necessary to make the code immediately executable." + ) + }, + { + "role": "user", + "content": coder_output + } + ] + ) + + output = final_response["choices"][0]["message"]["content"] + + output = output.replace("`", "").replace("python", "")#.replace("\n", "\\n") + output = output[1:] + + print("\n####################\nUser Input\n####################\n") + print(content) + + print("\n####################\nConverter Output\n####################\n") + print(converter_output) + + print("\n####################\nCoder Output\n####################\n") + print(coder_output) + + print("\n####################\nFinal Code\n####################\n") + print(output) + + answer = input("\nSoll der Code ausgeführt werden? (ja/nein) ") + if answer.lower() == "ja": + print("\nAusgabe:\n") + exec(output) + else: + print("\nCode wurde nicht ausgeführt.") \ No newline at end of file