# Triton backend to use name: "stablelm-2-1_6b" backend: "python" max_batch_size: 0 # Triton should expect as input a single string # input of variable length named 'text_input' input [ { name: "text_input" data_type: TYPE_STRING dims: [ -1 ] }, { name: "max_length" data_type: TYPE_INT32 dims: [ 1 ] optional: true }, { name: "max_new_tokens" data_type: TYPE_INT32 dims: [ 1 ] optional: true }, { name: "do_sample" data_type: TYPE_BOOL dims: [ 1 ] optional: true }, { name: "top_k" data_type: TYPE_INT32 dims: [ 1 ] optional: true }, { name: "top_p" data_type: TYPE_FP32 dims: [ 1 ] optional: true }, { name: "temperature" data_type: TYPE_FP32 dims: [ 1 ] optional: true }, { name: "repetition_penalty" data_type: TYPE_FP32 dims: [ 1 ] optional: true }, { name: "stream" data_type: TYPE_BOOL dims: [ 1 ] optional: true } ] # Triton should expect to respond with a single string # output of variable length named 'text_output' output [ { name: "text_output" data_type: TYPE_STRING dims: [ -1 ] } ] parameters: [ { key: "model_path", value: {string_value: "/cheetah/input/model/groupuser/stablelm-2-1_6b"} }, { key: "enable_inference_trace", value: {string_value: "True"} } ] instance_group [ { kind: KIND_GPU, count: 1 } ]