Quickstart
Minimal examples for G2P and waveform phoneme ASR.
G2P
from hama import G2PModel
model = G2PModel()
result = model.predict(
"Really? What's the orbital velocity of the moon?",
preserve_literals="punct",
)
print(result.ipa)
print(result.display_ipa) import { G2PNodeModel } from "hama-js/g2p";
const model = await G2PNodeModel.create();
const result = await model.predict(
"Really? What's the orbital velocity of the moon?",
{ preserveLiterals: "punct" },
);
console.log(result.ipa);
console.log(result.displayIpa); import { G2PBrowserModel } from "hama-js/g2p/browser";
const model = await G2PBrowserModel.create();
const result = await model.predict("안녕하세요", {
preserveLiterals: "punct",
});
console.log(result.ipa);
console.log(result.displayIpa); ASR
from hama import ASRModel
model = ASRModel()
result = model.transcribe_file("sample.wav")
print(result.phoneme_text)
print(result.word_phoneme_text) import { ASRNodeModel } from "hama-js/asr";
const model = await ASRNodeModel.create();
const result = await model.transcribeWavFile("sample.wav");
console.log(result.phonemeText); import { ASRBrowserModel } from "hama-js/asr/browser";
const model = await ASRBrowserModel.create({
modelUrl: "/assets/asr_waveform_fp16.onnx",
});
const result = await model.transcribeWaveform(float32Samples, 16000);
console.log(result.phonemeText); For full signatures, options, and return shapes, see APIs.