Skip to content

Commit b825987

Browse files
authored
[Examples] Better error handling and more robust output handling (second-state#47)
* Better error handling and more robust output handling * Fix GitHub Actions issues Signed-off-by: Michael Yuan <michael@secondstate.io> * Update llama.yml --------- Signed-off-by: Michael Yuan <michael@secondstate.io>
1 parent 316f02d commit b825987

File tree

5 files changed

+13
-8
lines changed

5 files changed

+13
-8
lines changed

‎.github/workflows/llama.yml‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,10 @@ jobs:
3030

3131
- name: Install apt-get packages
3232
run: |
33+
echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc
3334
sudo ACCEPT_EULA=Y apt-get update
3435
sudo ACCEPT_EULA=Y apt-get upgrade
35-
sudo apt-get install wget git curl software-properties-common build-essential
36+
sudo apt-get install wget git curl software-properties-common build-essential libopenblas-dev
3637
3738
- name: Install Rust target for wasm
3839
run: |

‎.github/workflows/pytorch.yml‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ jobs:
3030

3131
- name: Install apt-get packages
3232
run: |
33+
echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc
3334
sudo ACCEPT_EULA=Y apt-get update
3435
sudo ACCEPT_EULA=Y apt-get upgrade
3536
sudo apt-get install wget git curl software-properties-common build-essential

‎.github/workflows/tflite.yml‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ jobs:
3030

3131
- name: Install apt-get packages
3232
run: |
33+
echo RESET grub-efi/install_devices | sudo debconf-communicate grub-pc
3334
sudo ACCEPT_EULA=Y apt-get update
3435
sudo ACCEPT_EULA=Y apt-get upgrade
3536
sudo apt-get install wget git curl software-properties-common build-essential

‎wasmedge-ggml-llama/src/main.rs‎

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,25 +9,27 @@ fn main() {
99
let graph =
1010
wasi_nn::GraphBuilder::new(wasi_nn::GraphEncoding::Ggml, wasi_nn::ExecutionTarget::AUTO)
1111
.build_from_cache(model_name)
12-
.unwrap();
12+
.expect("Failed to load the model");
1313
println!("Loaded model into wasi-nn with ID: {:?}", graph);
1414

15-
let mut context = graph.init_execution_context().unwrap();
15+
let mut context = graph.init_execution_context().expect("Failed to init context");
1616
println!("Created wasi-nn execution context with ID: {:?}", context);
1717

1818
let tensor_data = prompt.as_bytes().to_vec();
1919
println!("Read input tensor, size in bytes: {}", tensor_data.len());
2020
context
2121
.set_input(0, wasi_nn::TensorType::U8, &[1], &tensor_data)
22-
.unwrap();
22+
.expect("Failed to set prompt as the input tensor");
2323

2424
// Execute the inference.
25-
context.compute().unwrap();
25+
context.compute().expect("Failed to complete inference");
2626
println!("Executed model inference");
2727

2828
// Retrieve the output.
29-
let mut output_buffer = vec![0u8; 1000];
30-
context.get_output(0, &mut output_buffer).unwrap();
31-
let output = String::from_utf8(output_buffer.clone()).unwrap();
29+
let max_output_size = 4096*6;
30+
let mut output_buffer = vec![0u8; max_output_size];
31+
let mut output_size = context.get_output(0, &mut output_buffer).expect("Failed to get inference output");
32+
output_size = std::cmp::min(max_output_size, output_size);
33+
let output = String::from_utf8_lossy(&output_buffer[..output_size]).to_string();
3234
println!("Output: {}", output);
3335
}
466 Bytes
Binary file not shown.

0 commit comments

Comments
 (0)