1:- module(llm, [llm/2]).
21:- use_module(library(error)). 22:- use_module(library(apply)). 23:- use_module(library(http/http_client)). 24:- use_module(library(http/http_json)). 25:- use_module(library(http/http_ssl_plugin)).
32llm(Input, Output) :-
33 ( var(Input)
34 -> ensure_prompt(Output, Target),
35 generate_prompt(Target, Prompt),
36 Input = Prompt,
37 constrained_prompt(Target, Prompt, PromptWithConstraint),
38 llm_prompt_text(PromptWithConstraint, Text),
39 unify_text(Text, Output)
40 ; llm_prompt_text(Input, Text),
41 unify_text(Text, Output)
42 ).
43
44llm_prompt_text(Input, Text) :-
45 ensure_prompt(Input, Prompt),
46 llm_request_body(Prompt, Body),
47 llm_post_json(Body, Response),
48 llm_extract_text(Response, Text).
49
50ensure_prompt(Input, Prompt) :-
51 ( string(Input)
52 -> Prompt = Input
53 ; atom(Input)
54 -> atom_string(Input, Prompt)
55 ; is_list(Input)
56 -> string_codes(Prompt, Input)
57 ; throw(error(type_error(text, Input), _))
58 ).
59
60llm_request_body(Prompt, _{model:Model, messages:[_{role:'user', content:Prompt}]}) :-
61 llm_model(Model).
62
63llm_model(Model) :-
64 ( getenv('LLM_MODEL', Raw), Raw \= ''
65 -> ensure_string(Raw, Model)
66 ; Model = "gpt-4o-mini"
67 ).
68
69llm_post_json(Body, Response) :-
70 llm_endpoint(URL),
71 llm_auth_header(Header),
72 llm_timeout(Timeout),
73 Options = [
74 request_header('Authorization'=Header),
75 accept(json),
76 timeout(Timeout),
77 json_object(dict),
78 status_error(false),
79 status_code(Status)
80 ],
81 catch(
82 http_post(URL, json(Body), Data, Options),
83 Error,
84 throw(error(llm_request_failed(Error), _))
85 ),
86 handle_status(Status, Data, Response).
87
88handle_status(Status, Data, Response) :-
89 between(200, 299, Status),
90 !,
91 Response = Data.
92handle_status(Status, Data, _) :-
93 throw(error(llm_http_error(Status, Data), _)).
94
95unify_text(Text, Output) :-
96 ( var(Output)
97 -> Output = Text
98 ; ensure_prompt(Output, Expected),
99 Expected = Text
100 ).
101
102generate_prompt(Target, Prompt) :-
103 format(string(Request),
104 "Provide a single user prompt that would make you reply with the exact text \"~w\". Return only the prompt.",
105 [Target]),
106 llm_prompt_text(Request, Suggestion),
107 ensure_prompt(Suggestion, Prompt).
108
109constrained_prompt(Target, Prompt, FinalPrompt) :-
110 format(string(FinalPrompt),
111 "You must answer ONLY with the exact text \"~w\" (case sensitive, no punctuation or extra words). Now respond to the following prompt:\n\n~w",
112 [Target, Prompt]).
113
114llm_endpoint(URL) :-
115 ( getenv('LLM_API_URL', URL), URL \= ''
116 -> true
117 ; throw(error(existence_error(environment_variable, 'LLM_API_URL'), _))
118 ).
119
(Header) :-
121 ( getenv('LLM_API_KEY', Key), Key \= ''
122 -> ensure_string(Key, KeyStr),
123 format(string(Header), 'Bearer ~w', [KeyStr])
124 ; throw(error(existence_error(environment_variable, 'LLM_API_KEY'), _))
125 ).
126
127llm_timeout(Timeout) :-
128 ( getenv('LLM_API_TIMEOUT', Raw), Raw \= '',
129 catch(number_string(Timeout, Raw), _, fail)
130 -> true
131 ; Timeout = 60
132 ).
133
(Response, Output) :-
135 ( _{choices:[First|_]} :< Response
136 -> extract_choice_text(First, Output0)
137 ; ( get_dict(output, Response, Output0)
138 ; get_dict(response, Response, Output0)
139 )
140 ),
141 ensure_string(Output0, Output),
142 !.
143llm_extract_text(Response, _) :-
144 throw(error(domain_error(llm_response, Response), _)).
145
(Choice, Text) :-
147 ( get_dict(message, Choice, Message),
148 get_dict(content, Message, Content)
149 -> normalize_content(Content, Text)
150 ; get_dict(text, Choice, Text)
151 ).
152
153normalize_content(Content, Text) :-
154 ( string(Content)
155 -> Text = Content
156 ; is_list(Content)
157 -> maplist(segment_text, Content, Segments),
158 atomics_to_string(Segments, Text)
159 ; atom(Content)
160 -> atom_string(Content, Text)
161 ; throw(error(type_error(llm_content, Content), _))
162 ).
163
164segment_text(Dict, Text) :-
165 ( is_dict(Dict),
166 get_dict(type, Dict, 'text'),
167 get_dict(text, Dict, Text0)
168 -> ensure_string(Text0, Text)
169 ; ensure_string(Dict, Text)
170 ).
171
172ensure_string(Value, Text) :-
173 ( string(Value)
174 -> Text = Value
175 ; atom(Value)
176 -> atom_string(Value, Text)
177 ; is_list(Value)
178 -> string_codes(Text, Value)
179 ; throw(error(type_error(text, Value), _))
180 )
Simple LLM client
This module exposes the predicate llm/2, which posts a user prompt to an HTTP-based large language model (LLM) API and unifies the model's response with the second argument.
Configuration is provided through environment variables. The library can be reused across different APIs.
The library assumes an OpenAI-compatible payload/response. To target a different API adjust llm_request_body/2 or llm_extract_text/2. */