Update README.md
This commit is contained in:
parent
84790c1a60
commit
3806044eb8
@ -83,7 +83,10 @@ pip install -U FlagEmbedding
|
|||||||
```python
|
```python
|
||||||
from FlagEmbedding import BGEM3FlagModel
|
from FlagEmbedding import BGEM3FlagModel
|
||||||
|
|
||||||
model = BGEM3FlagModel('BAAI/bge-m3', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
model = BGEM3FlagModel('BAAI/bge-m3',
|
||||||
|
batch_size=12, #
|
||||||
|
max_length=8192, # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
|
||||||
|
use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
|
||||||
|
|
||||||
sentences_1 = ["What is BGE M3?", "Defination of BM25"]
|
sentences_1 = ["What is BGE M3?", "Defination of BM25"]
|
||||||
sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
|
sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
|
||||||
@ -183,8 +186,10 @@ print(model.compute_score(sentence_pairs))
|
|||||||

|

|
||||||
|
|
||||||
- Long Document Retrieval
|
- Long Document Retrieval
|
||||||
|
- MLDR:
|
||||||

|

|
||||||
|
- NarritiveQA:
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
## Training
|
## Training
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user