-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathnatural_language_cli.py
More file actions
168 lines (142 loc) · 6.79 KB
/
natural_language_cli.py
File metadata and controls
168 lines (142 loc) · 6.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
from flowchat import Chain, autodedent
import os
import subprocess
def execute_system_command(command: str) -> str:
try:
result = subprocess.run(
command, shell=True, check=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
return result.stdout
except subprocess.CalledProcessError as e:
return e.stderr
def main():
print("Welcome to the Natural Language Command Line Interface!")
os_system_context = f"You are a shell interpreter assistant running on {os.name} operating system."
while True:
user_input = input("Please enter your command in natural language: ")
# ========================================================================== #
should_exit = (
Chain(model="gpt-3.5-turbo")
.link(autodedent(
"Does the user want to exit the CLI? Respond with 'YES' or 'NO'.",
user_input
)).pull(max_tokens=2).unhook().last()
)
if should_exit.lower() in ("yes", "y"):
print("Exiting the CLI.")
break
# ========================================================================== #
print("Checking if the command is possible to execute...")
# Check if the user's request is possible; example of nested chains!
# In practice, you could just ignore all of this and just execute the command.
possible = (
Chain(model="gpt-4-turbo")
.anchor(os_system_context)
.link(autodedent(
"The user would like to do this: ",
user_input
))
.link("Create a short list of the minimum requirements that need to be checked in order to determine if this action is possible on this device.")
.pull(json_schema={"requirement_list": "List[str]"})
.transform(lambda requirement_json: requirement_json["requirement_list"]).log()
.transform(lambda requirement_list: [
Chain("gpt-4-turbo")
.anchor(os_system_context)
.link(autodedent(
"Suggest a command that can check if this requirement is met. The command should be a one-liner without user input or interaction.",
requirement,
"If the command needs additional information, you can include it. If the command itself can be run alone, leave additional_info an empty list."
))
.pull(json_schema={"command": "string", "additional_info": "List[str]"})
.transform(lambda command_json: (command_json["command"], [
Chain("gpt-4-turbo")
.anchor(os_system_context)
.link(autodedent(
"The user would like to know this information: ",
info,
"Suggest a command that can check if this information is available."
))
.pull(json_schema={"command": "string"})
.transform(lambda command_json: command_json["command"])
.transform(lambda command: f"{info} | Output:{execute_system_command(command)}")
.unhook().last()
for info in command_json.get("additional_info")]
)).unhook()
.anchor(os_system_context)
.link(lambda command_info: autodedent(
"Include the additional information in the command:",
command_info[0],
*command_info[1],
"to create a final command that can check if this requirement is met:",
requirement
))
.pull(json_schema={"command": "string"})
.transform(lambda command_json: command_json["command"])
.unhook()
.anchor(os_system_context)
.transform(
lambda command: Chain("gpt-4-turbo")
.anchor(os_system_context)
.link(autodedent(
f"The user would like to check if this requirement is met: {requirement}",
f"The user executes this command: {command}:",
"Output:",
(lambda a: a if a else "<empty_response>")(
execute_system_command(command)
),
f"Does the output indicate that the requirement is met?",
))
.pull(json_schema={"is_met": "bool"})
.transform(lambda is_met_json: is_met_json["is_met"])
.unhook().last()
)
.last()
for requirement in requirement_list
])
.last()
)
if all(possible):
print("This command should be possible to execute!")
elif sum(possible) / len(possible) > 0.5:
print("This command might be possible to execute.")
else:
print("This command is not possible to execute.")
continue
# ========================================================================== #
print("Suggesting a command...")
# Feed the input to flowchat
command_suggestion = (
Chain(model="gpt-4-turbo")
.anchor(os_system_context)
.link(autodedent(
"The user wants to do this: ",
user_input,
"Suggest a command that can achieve this in one line without user input or interaction."
)).pull().unhook()
.anchor(os_system_context)
.link(lambda suggestion: autodedent(
"Extract ONLY the command from this command desciption:",
suggestion
))
# define a JSON schema to extract the command from the suggestion
.pull(json_schema={"command": "echo 'Hello World!'"})
.transform(lambda command_json: command_json["command"])
.unhook().last()
)
print(f"Suggested command: {command_suggestion}")
# ========================================================================== #
# Execute the suggested command and get the result
command_output = execute_system_command(command_suggestion)
print(f"Command executed. Output:\n{command_output}")
if command_output != "":
description = (
Chain(model="gpt-3.5-turbo").anchor(os_system_context)
.link(f"Describe this output:\n{command_output}")
.pull().unhook().last()
)
# Logging the description
print(f"Explanation:\n{description}")
print("=" * 60)
if __name__ == "__main__":
main()