소스 검색

chore: use node specify llm to auto generate prompt (#6525)

Joel 9 달 전
부모
커밋
dc7335cdf8

+ 10 - 0
web/app/components/app/configuration/config-prompt/simple-prompt-input.tsx

@@ -14,6 +14,7 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap'
 import cn from '@/utils/classnames'
 import cn from '@/utils/classnames'
 import { type PromptVariable } from '@/models/debug'
 import { type PromptVariable } from '@/models/debug'
 import Tooltip from '@/app/components/base/tooltip'
 import Tooltip from '@/app/components/base/tooltip'
+import type { CompletionParams } from '@/types/app'
 import { AppType } from '@/types/app'
 import { AppType } from '@/types/app'
 import { getNewVar, getVars } from '@/utils/var'
 import { getNewVar, getVars } from '@/utils/var'
 import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
 import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
@@ -58,6 +59,7 @@ const Prompt: FC<ISimplePromptInput> = ({
   const { eventEmitter } = useEventEmitterContextContext()
   const { eventEmitter } = useEventEmitterContextContext()
   const {
   const {
     modelConfig,
     modelConfig,
+    completionParams,
     dataSets,
     dataSets,
     setModelConfig,
     setModelConfig,
     setPrevPromptConfig,
     setPrevPromptConfig,
@@ -247,6 +249,14 @@ const Prompt: FC<ISimplePromptInput> = ({
       {showAutomatic && (
       {showAutomatic && (
         <GetAutomaticResModal
         <GetAutomaticResModal
           mode={mode as AppType}
           mode={mode as AppType}
+          model={
+            {
+              provider: modelConfig.provider,
+              name: modelConfig.model_id,
+              mode: modelConfig.mode,
+              completion_params: completionParams as CompletionParams,
+            }
+          }
           isShow={showAutomatic}
           isShow={showAutomatic}
           onClose={showAutomaticFalse}
           onClose={showAutomaticFalse}
           onFinished={handleAutomaticRes}
           onFinished={handleAutomaticRes}

+ 11 - 1
web/app/components/app/configuration/config/automatic/get-automatic-res.tsx

@@ -20,6 +20,7 @@ import Button from '@/app/components/base/button'
 import Toast from '@/app/components/base/toast'
 import Toast from '@/app/components/base/toast'
 import { generateRule } from '@/service/debug'
 import { generateRule } from '@/service/debug'
 import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
 import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
+import type { Model } from '@/types/app'
 import { AppType } from '@/types/app'
 import { AppType } from '@/types/app'
 import ConfigVar from '@/app/components/app/configuration/config-var'
 import ConfigVar from '@/app/components/app/configuration/config-var'
 import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'
 import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'
@@ -33,6 +34,7 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
 
 
 export type IGetAutomaticResProps = {
 export type IGetAutomaticResProps = {
   mode: AppType
   mode: AppType
+  model: Model
   isShow: boolean
   isShow: boolean
   onClose: () => void
   onClose: () => void
   onFinished: (res: AutomaticRes) => void
   onFinished: (res: AutomaticRes) => void
@@ -57,6 +59,7 @@ const TryLabel: FC<{
 
 
 const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
 const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
   mode,
   mode,
+  model,
   isShow,
   isShow,
   onClose,
   onClose,
   isInLLMNode,
   isInLLMNode,
@@ -149,10 +152,17 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
       return
       return
     setLoadingTrue()
     setLoadingTrue()
     try {
     try {
-      const res = await generateRule({
+      const { error, ...res } = await generateRule({
         instruction,
         instruction,
+        model_config: model,
       })
       })
       setRes(res)
       setRes(res)
+      if (error) {
+        Toast.notify({
+          type: 'error',
+          message: error,
+        })
+      }
     }
     }
     finally {
     finally {
       setLoadingFalse()
       setLoadingFalse()

+ 4 - 1
web/app/components/workflow/nodes/_base/components/prompt/editor.tsx

@@ -9,6 +9,7 @@ import { useTranslation } from 'react-i18next'
 import { useBoolean } from 'ahooks'
 import { useBoolean } from 'ahooks'
 import { BlockEnum, EditionType } from '../../../../types'
 import { BlockEnum, EditionType } from '../../../../types'
 import type {
 import type {
+  ModelConfig,
   Node,
   Node,
   NodeOutPutVar,
   NodeOutPutVar,
   Variable,
   Variable,
@@ -58,6 +59,7 @@ type Props = {
   availableNodes?: Node[]
   availableNodes?: Node[]
   isSupportPromptGenerator?: boolean
   isSupportPromptGenerator?: boolean
   onGenerated?: (prompt: string) => void
   onGenerated?: (prompt: string) => void
+  modelConfig?: ModelConfig
   // for jinja
   // for jinja
   isSupportJinja?: boolean
   isSupportJinja?: boolean
   editionType?: EditionType
   editionType?: EditionType
@@ -90,6 +92,7 @@ const Editor: FC<Props> = ({
   varList = [],
   varList = [],
   handleAddVariable,
   handleAddVariable,
   onGenerated,
   onGenerated,
+  modelConfig,
 }) => {
 }) => {
   const { t } = useTranslation()
   const { t } = useTranslation()
   const { eventEmitter } = useEventEmitterContextContext()
   const { eventEmitter } = useEventEmitterContextContext()
@@ -130,7 +133,7 @@ const Editor: FC<Props> = ({
             <div className='flex items-center'>
             <div className='flex items-center'>
               <div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
               <div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
               {isSupportPromptGenerator && (
               {isSupportPromptGenerator && (
-                <PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} />
+                <PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} modelConfig={modelConfig} />
               )}
               )}
 
 
               <div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>
               <div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>

+ 4 - 1
web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx

@@ -4,7 +4,7 @@ import React, { useCallback, useEffect, useState } from 'react'
 import { uniqueId } from 'lodash-es'
 import { uniqueId } from 'lodash-es'
 import { useTranslation } from 'react-i18next'
 import { useTranslation } from 'react-i18next'
 import { RiQuestionLine } from '@remixicon/react'
 import { RiQuestionLine } from '@remixicon/react'
-import type { PromptItem, Variable } from '../../../types'
+import type { ModelConfig, PromptItem, Variable } from '../../../types'
 import { EditionType } from '../../../types'
 import { EditionType } from '../../../types'
 import { useWorkflowStore } from '../../../store'
 import { useWorkflowStore } from '../../../store'
 import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
 import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor'
@@ -38,6 +38,7 @@ type Props = {
   availableNodes: any
   availableNodes: any
   varList: Variable[]
   varList: Variable[]
   handleAddVariable: (payload: any) => void
   handleAddVariable: (payload: any) => void
+  modelConfig?: ModelConfig
 }
 }
 
 
 const roleOptions = [
 const roleOptions = [
@@ -77,6 +78,7 @@ const ConfigPromptItem: FC<Props> = ({
   availableNodes,
   availableNodes,
   varList,
   varList,
   handleAddVariable,
   handleAddVariable,
+  modelConfig,
 }) => {
 }) => {
   const { t } = useTranslation()
   const { t } = useTranslation()
   const workflowStore = useWorkflowStore()
   const workflowStore = useWorkflowStore()
@@ -138,6 +140,7 @@ const ConfigPromptItem: FC<Props> = ({
       availableNodes={availableNodes}
       availableNodes={availableNodes}
       isSupportPromptGenerator={payload.role === PromptRole.system}
       isSupportPromptGenerator={payload.role === PromptRole.system}
       onGenerated={handleGenerated}
       onGenerated={handleGenerated}
+      modelConfig={modelConfig}
       isSupportJinja
       isSupportJinja
       editionType={payload.edition_type}
       editionType={payload.edition_type}
       onEditionTypeChange={onEditionTypeChange}
       onEditionTypeChange={onEditionTypeChange}

+ 5 - 1
web/app/components/workflow/nodes/llm/components/config-prompt.tsx

@@ -5,7 +5,7 @@ import { useTranslation } from 'react-i18next'
 import produce from 'immer'
 import produce from 'immer'
 import { ReactSortable } from 'react-sortablejs'
 import { ReactSortable } from 'react-sortablejs'
 import { v4 as uuid4 } from 'uuid'
 import { v4 as uuid4 } from 'uuid'
-import type { PromptItem, ValueSelector, Var, Variable } from '../../../types'
+import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types'
 import { EditionType, PromptRole } from '../../../types'
 import { EditionType, PromptRole } from '../../../types'
 import useAvailableVarList from '../../_base/hooks/use-available-var-list'
 import useAvailableVarList from '../../_base/hooks/use-available-var-list'
 import { useWorkflowStore } from '../../../store'
 import { useWorkflowStore } from '../../../store'
@@ -33,6 +33,7 @@ type Props = {
   }
   }
   varList?: Variable[]
   varList?: Variable[]
   handleAddVariable: (payload: any) => void
   handleAddVariable: (payload: any) => void
+  modelConfig: ModelConfig
 }
 }
 
 
 const ConfigPrompt: FC<Props> = ({
 const ConfigPrompt: FC<Props> = ({
@@ -47,6 +48,7 @@ const ConfigPrompt: FC<Props> = ({
   hasSetBlockStatus,
   hasSetBlockStatus,
   varList = [],
   varList = [],
   handleAddVariable,
   handleAddVariable,
+  modelConfig,
 }) => {
 }) => {
   const { t } = useTranslation()
   const { t } = useTranslation()
   const workflowStore = useWorkflowStore()
   const workflowStore = useWorkflowStore()
@@ -199,6 +201,7 @@ const ConfigPrompt: FC<Props> = ({
                           availableNodes={availableNodesWithParent}
                           availableNodes={availableNodesWithParent}
                           varList={varList}
                           varList={varList}
                           handleAddVariable={handleAddVariable}
                           handleAddVariable={handleAddVariable}
+                          modelConfig={modelConfig}
                         />
                         />
                       </div>
                       </div>
                     )
                     )
@@ -234,6 +237,7 @@ const ConfigPrompt: FC<Props> = ({
               onEditionTypeChange={handleCompletionEditionTypeChange}
               onEditionTypeChange={handleCompletionEditionTypeChange}
               handleAddVariable={handleAddVariable}
               handleAddVariable={handleAddVariable}
               onGenerated={handleGenerated}
               onGenerated={handleGenerated}
+              modelConfig={modelConfig}
             />
             />
           </div>
           </div>
         )}
         )}

+ 6 - 0
web/app/components/workflow/nodes/llm/components/prompt-generator-btn.tsx

@@ -7,14 +7,19 @@ import { Generator } from '@/app/components/base/icons/src/vender/other'
 import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
 import GetAutomaticResModal from '@/app/components/app/configuration/config/automatic/get-automatic-res'
 import { AppType } from '@/types/app'
 import { AppType } from '@/types/app'
 import type { AutomaticRes } from '@/service/debug'
 import type { AutomaticRes } from '@/service/debug'
+import type { ModelConfig } from '@/app/components/workflow/types'
+import type { Model } from '@/types/app'
+
 type Props = {
 type Props = {
   className?: string
   className?: string
   onGenerated?: (prompt: string) => void
   onGenerated?: (prompt: string) => void
+  modelConfig?: ModelConfig
 }
 }
 
 
 const PromptGeneratorBtn: FC<Props> = ({
 const PromptGeneratorBtn: FC<Props> = ({
   className,
   className,
   onGenerated,
   onGenerated,
+  modelConfig,
 }) => {
 }) => {
   const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
   const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
   const handleAutomaticRes = useCallback((res: AutomaticRes) => {
   const handleAutomaticRes = useCallback((res: AutomaticRes) => {
@@ -32,6 +37,7 @@ const PromptGeneratorBtn: FC<Props> = ({
           isShow={showAutomatic}
           isShow={showAutomatic}
           onClose={showAutomaticFalse}
           onClose={showAutomaticFalse}
           onFinished={handleAutomaticRes}
           onFinished={handleAutomaticRes}
+          model={modelConfig as Model}
           isInLLMNode
           isInLLMNode
         />
         />
       )}
       )}

+ 1 - 0
web/app/components/workflow/nodes/llm/panel.tsx

@@ -178,6 +178,7 @@ const Panel: FC<NodePanelProps<LLMNodeType>> = ({
             hasSetBlockStatus={hasSetBlockStatus}
             hasSetBlockStatus={hasSetBlockStatus}
             varList={inputs.prompt_config?.jinja2_variables || []}
             varList={inputs.prompt_config?.jinja2_variables || []}
             handleAddVariable={handleAddVariable}
             handleAddVariable={handleAddVariable}
+            modelConfig={model}
           />
           />
         )}
         )}
 
 

+ 1 - 0
web/service/debug.ts

@@ -7,6 +7,7 @@ export type AutomaticRes = {
   prompt: string
   prompt: string
   variables: string[]
   variables: string[]
   opening_statement: string
   opening_statement: string
+  error?: string
 }
 }
 
 
 export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {
 export const sendChatMessage = async (appId: string, body: Record<string, any>, { onData, onCompleted, onThought, onFile, onError, getAbortController, onMessageEnd, onMessageReplace }: {

+ 56 - 53
web/types/app.ts

@@ -135,9 +135,64 @@ export enum AgentStrategy {
   react = 'react',
   react = 'react',
 }
 }
 
 
+export type CompletionParams = {
+  /** Maximum number of tokens in the answer message returned by Completion */
+  max_tokens: number
+  /**
+   * A number between 0 and 2.
+   * The larger the number, the more random the result;
+   * otherwise, the more deterministic.
+   * When in use, choose either `temperature` or `top_p`.
+   * Default is 1.
+   */
+  temperature: number
+  /**
+   * Represents the proportion of probability mass samples to take,
+   * e.g., 0.1 means taking the top 10% probability mass samples.
+   * The determinism between the samples is basically consistent.
+   * Among these results, the `top_p` probability mass results are taken.
+   * When in use, choose either `temperature` or `top_p`.
+   * Default is 1.
+   */
+  top_p: number
+  /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
+  echo: boolean
+  /**
+   * Specify up to 4 to automatically stop generating before the text specified in `stop`.
+   * Suitable for use in chat mode.
+   * For example, specify "Q" and "A",
+   * and provide some Q&A examples as context,
+   * and the model will give out in Q&A format and stop generating before Q&A.
+   */
+  stop: string[]
+  /**
+   * A number between -2.0 and 2.0.
+   * The larger the value, the less the model will repeat topics and the more it will provide new topics.
+   */
+  presence_penalty: number
+  /**
+   * A number between -2.0 and 2.0.
+   * A lower setting will make the model appear less cultured,
+   * always repeating expressions.
+   * The difference between `frequency_penalty` and `presence_penalty`
+   * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
+   * while `presence_penalty` penalizes a word based on its occurrence in the input text.
+   */
+  frequency_penalty: number
+}
 /**
 /**
  * Model configuration. The backend type.
  * Model configuration. The backend type.
  */
  */
+export type Model = {
+  /** LLM provider, e.g., OPENAI */
+  provider: string
+  /** Model name, e.g, gpt-3.5.turbo */
+  name: string
+  mode: ModelModeType
+  /** Default Completion call parameters */
+  completion_params: CompletionParams
+}
+
 export type ModelConfig = {
 export type ModelConfig = {
   opening_statement: string
   opening_statement: string
   suggested_questions?: string[]
   suggested_questions?: string[]
@@ -174,59 +229,7 @@ export type ModelConfig = {
     strategy?: AgentStrategy
     strategy?: AgentStrategy
     tools: ToolItem[]
     tools: ToolItem[]
   }
   }
-  model: {
-    /** LLM provider, e.g., OPENAI */
-    provider: string
-    /** Model name, e.g, gpt-3.5.turbo */
-    name: string
-    mode: ModelModeType
-    /** Default Completion call parameters */
-    completion_params: {
-      /** Maximum number of tokens in the answer message returned by Completion */
-      max_tokens: number
-      /**
-       * A number between 0 and 2.
-       * The larger the number, the more random the result;
-       * otherwise, the more deterministic.
-       * When in use, choose either `temperature` or `top_p`.
-       * Default is 1.
-       */
-      temperature: number
-      /**
-       * Represents the proportion of probability mass samples to take,
-       * e.g., 0.1 means taking the top 10% probability mass samples.
-       * The determinism between the samples is basically consistent.
-       * Among these results, the `top_p` probability mass results are taken.
-       * When in use, choose either `temperature` or `top_p`.
-       * Default is 1.
-       */
-      top_p: number
-      /** When enabled, the Completion Text will concatenate the Prompt content together and return it. */
-      echo: boolean
-      /**
-       * Specify up to 4 to automatically stop generating before the text specified in `stop`.
-       * Suitable for use in chat mode.
-       * For example, specify "Q" and "A",
-       * and provide some Q&A examples as context,
-       * and the model will give out in Q&A format and stop generating before Q&A.
-       */
-      stop: string[]
-      /**
-       * A number between -2.0 and 2.0.
-       * The larger the value, the less the model will repeat topics and the more it will provide new topics.
-       */
-      presence_penalty: number
-      /**
-       * A number between -2.0 and 2.0.
-       * A lower setting will make the model appear less cultured,
-       * always repeating expressions.
-       * The difference between `frequency_penalty` and `presence_penalty`
-       * is that `frequency_penalty` penalizes a word based on its frequency in the training data,
-       * while `presence_penalty` penalizes a word based on its occurrence in the input text.
-       */
-      frequency_penalty: number
-    }
-  }
+  model: Model
   dataset_configs: DatasetConfigs
   dataset_configs: DatasetConfigs
   file_upload?: {
   file_upload?: {
     image: VisionSettings
     image: VisionSettings