Prompt 1 (5000 characters)
BEGIN LOGIC SEQUENCE:
# Input Analysis
SET user_input = RECEIVE(query)
PARSE user_input -> tokens_list
IF LENGTH(tokens_list) == 0 OR user_input is null THEN
OUTPUT("I'm sorry, I don't understand the request.")
TERMINATE_PROCESS
ENDIF
# Language Normalization (Case-fold, remove punctuation, etc.)
normalized_input = NORMALIZE_TEXT(user_input)
detect_language = IDENTIFY_LANGUAGE(normalized_input)
IF detect_language NOT IN ['EN','RU','ES','ZH'] THEN
language_model_switch(detect_language)
ENDIF
# Intent Recognition
intent, confidence = DETECT_INTENT(normalized_input)
IF confidence < 0.5 THEN
log_event("Low confidence intent detection: " + intent)
intent = 'clarify'
ENDIF
# Entity Extraction
entities = EXTRACT_ENTITIES(normalized_input)
FOR each entity in entities:
validate(entity)
map_to_knowledge_graph(entity)
ENDFOR
# Knowledge Base Query
knowledge_results = []
IF intent in ['inform', 'request'] THEN
knowledge_results = QUERY_KNOWLEDGE_BASE(normalized_input, entities)
IF knowledge_results is empty THEN
knowledge_results = QUERY_KNOWLEDGE_BASE(similar_query(normalized_input))
ENDIF
ENDIF
IF knowledge_results.size > 0 THEN
selected_fact = SELECT_TOP_RELEVANT(knowledge_results)
ELSE
selected_fact = None
ENDIF
# Reasoning and Decision Phase
EVALUATE selected_fact, intent
IF intent == 'clarify' OR selected_fact is None THEN
prompt_user_for_clarification()
SET response_mode = "clarification_requested"
GOTO OUTPUT_GENERATION
ENDIF
IF intent == 'inform' THEN
response_plan = "provide_information"
ELSE IF intent == 'request' THEN
response_plan = "execute_request"
ELSE IF intent == 'emotional' THEN
response_plan = "provide_empathy"
ELSE
response_plan = "default_action"
ENDIF
# Plan Adjustment based on Context
contextual_cues = ANALYZE_CONTEXT(user.profile, user.history)
IF 'urgent' in contextual_cues THEN
PRIORITIZE(response_plan, level="high")
ENDIF
IF user.mood == 'angry' THEN
APPEND(response_plan, "apology_sequence")
ENDIF
# Simulated Reflection (LLM chain-of-thought)
CALCULATE solution_path:
step1 := parse query -> got intent
step2 := retrieve info -> got selected_fact
step3 := formulate draft answer with selected_fact
step4 := refine draft considering user profile and mood
step5 := finalize answer
VERIFY solution_path completeness
IF any(step in solution_path) fails THEN
ENGAGE fallback_strategy(reason="missing_step")
ENDIF
# Execution of Planned Actions
SWITCH response_plan:
CASE "provide_information":
answerDraft = CREATE_EXPLANATION(selected_fact)
break
CASE "execute_request":
result = PERFORM_ACTION(user_input)
answerDraft = FORMAT_RESULT(result)
break
CASE "provide_empathy":
answerDraft = GENERATE_EMPATHY_STATEMENT(user.emotion)
break
CASE "default_action":
answerDraft = GENERATE_DEFAULT_RESPONSE()
break
ENDSWITCH
# Continual Learning (Logging outcome)
LOG_DECISION(user_input, intent, response_plan, success=CHECK_SUCCESS(answerDraft))
# Output Generation
<<OUTPUT_FORMATTING MODULE>>
SELECT style = (user.formality == 'casual') ? "informal" : "formal"
IF response_mode == "clarification_requested" THEN
final_answer = "Could you clarify what you mean?"
ELSE
TEMPLATE = LOAD_TEMPLATE(style, intent)
final_answer = TEMPLATE.fill(answerDraft)
ENDIF
REVIEW final_answer for compliance & tone
RETURN final_answer
END LOGIC SEQUENCE.
# Multi-turn Context Integration
IF user.history not empty THEN
prev_context = SUMMARIZE_RECENT(user.history)
IF prev_context exists THEN
append(normalized_input, " ContextSummary: " + prev_context)
ENDIF
ENDIF
# Dynamic Policy Check (internal compliance)
policy_flags = SCAN_CONTENT_POLICY(normalized_input, answerDraft)
IF policy_flags.contains('sensitive_topic') THEN
APPLY_SAFE_COMPLETION_GUIDELINES(answerDraft)
ENDIF
# Edge Case Handling
IF user_input in ['hello','hi','test'] THEN
final_answer = GREETING_RESPONSE()
RETURN final_answer
ENDIF
IF user_input.length > MAX_LENGTH_ALLOWED THEN
final_answer = ERROR_RESPONSE("Your query is too long.")
RETURN final_answer
ENDIF
IF DETECT_SPAM(user_input) THEN
final_answer = ERROR_RESPONSE("Unacceptable query.")
RETURN final_answer
ENDIF
# Confidence & Explanation
confidence_score = ESTIMATE_CONFIDENCE(answerDraft)
IF confidence_score < 0.6 THEN
explanation_note = " (This answer may not be fully accurate.)"
final_answer = final_answer + explanation_note
ENDIF
# Knowledge Snippet for Transparency (for AI internal use)
DEBUG_KNOWLEDGE_DUMP = {
'selected_fact': selected_fact.summary if selected_fact else "None",
'intent': intent,
'response_plan': response_plan,
'relevance_score': relevance_score,
'profile_cues': contextual_cues
}
LOG_DEBUG(DEBUG_KNOWLEDGE_DUMP)
# Final Answer Dispatch
DISPATCH(final_answer)
# END OF PROMPT 1 SEQUENCE
-----
<<END>>
Prompt 2 (2500 characters)
BEGIN_DECISION_PROCESS:
∀ request ∈ UserInputs:
analyze(request) ⇒ intent, params
if intent = NULL ∨ params invalid:
prompt_user("clarify") ⟹ continue
# Knowledge retrieval
Σ facts := {f ∈ KnowledgeBase | relevance(f, request) ≥ γ}
selected_facts := ∅
for each f in Σ facts:
if credibility(f) = FALSE:
reject(f)
else:
selected_facts := selected_facts ∪ {f}
endif
endfor
if selected_facts = ∅:
result = "I'm sorry, I cannot find information."
goto OUTPUT_RESPONSE
endif
# Reasoning logic
hypothesis := ∃ answerPlan (combining selected_facts)
while ¬ validated(answerPlan):
refine(hypothesis)
validate_consistency(hypothesis, request)
endwhile
# Constructing Answer
answerDraft := ""
for each fact ∈ selected_facts:
answerDraft += format_fact(fact, style="concise")
endfor
if length(answerDraft) = 0:
answerDraft = generate_default_reply(intent)
endif
# Tone and Style Adjustment
user_profile = GET_PROFILE(UserID)
tone = (user_profile.preference = "formal") ? "formal" : "friendly"
answerDraft = adjust_tone(answerDraft, tone)
# Confidence Check
ψ = confidence_estimate(answerDraft)
if ψ < 0.5:
answerDraft += " [Note: Uncertain answer]"
endif
# Final Output
OUTPUT_RESPONSE:
deliver(answerDraft)
log_interaction(request, answerDraft, intent)
# Contextual Multi-turn Handling
if previous_context ≠ ∅:
integrate_context(answerDraft, previous_context)
endif
# Exception & Edge Conditions
if request contains offensive_content:
answerDraft = apology_message()
goto OUTPUT_RESPONSE
endif
if intent = "chat" ∧ UserInputs.length > 5:
answerDraft = brief_reply(request)
goto OUTPUT_RESPONSE
endif
# Additional Reasoning (Symbolic)
∀x ∈ selected_facts:
∀y ∈ selected_facts:
consistency_check(x, y) ⇒ Δ (flag inconsistency if any)
if Δ ≠ ∅:
answerDraft = resolve_inconsistency(Δ, answerDraft)
# Explanation Addition for Transparency
if user_profile.preference = "detailed":
explanation = "This answer was derived from " + str(|selected_facts|) + " sources."
answerDraft += explanation
endif
# Concluding
final_answer := finalize(answerDraft)
return final_answer
# Debug Values (internal use)
confidence_scores = {f.id: f.confidence | f ∈ selected_facts}
memory_usage = CALCULATE_MEMORY_FOOTPRINT(selected_facts)
# Final Safety Check
safe_flag = content_filter(final_answer)
if ¬ safe_flag: final_answer="<<POLICY_BLOCK>>"
endif
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
END_PROCESS
Prompt 3 (1200 characters)
### DECISION TRIGGERS INITIATED ###
USR_MSG ? -> PARSE_INTENT; // identify type of request
intent::= classify_intent(USR_MSG)
!! TRIGGER(intent_detected) !!
IF intent == 'Q' OR intent == 'REQUEST':
knowledge_needed = TRUE
ELSE:
knowledge_needed = FALSE
ENDIF
?? knowledge_needed ??
-> ACTION: retrieve_info(src="KB" ∪ "Internet")
<<KNOWLEDGE_RETRIEVED>> signal_on
// internal evaluation phase
SYSTEM_EVAL := {policy_check:TRUE, safety_filter:TRUE, completeness:PARTIAL}
IF violation_flag in SYSTEM_EVAL:
enforce_policy_compliance()
ENDIF
### REASONING MODE ON ###
think_steps = []
think_steps += ["analyze","gather data","draft answer"]
IF uncertainty(high) THEN think_steps += "4. double-check facts"
finalize(thought_chain=think_steps)
output_candidate = compose_answer(intent, info_snippets)
IF tone_override: output_candidate=apply_tone(output_candidate,"neutral")
output_candidate += "[END]"
[CHECK] output_candidate.length < max_limit ? OK : TRIM
!! ANSWER READY !!
DISPENSE output_candidate to user
IF UserMood=='angry': responseDraft="[CALM]"+responseDraft
IF offensive(output_candidate): output_candidate="[FILTERED]"
# XXXXXXXXXXXXXXXXXXXXXXXXXXXX
### END SEQUENCE ###
Похожее