Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
2be29bb1
Commit
2be29bb1
authored
Aug 01, 2023
by
jyong
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
1.add batch add segment 2.support delete segment 3.support un_archive document 4. QA language
parent
ceff8edb
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
180 additions
and
0 deletions
+180
-0
2c8af9671032_add_qa_document_language.py
...rations/versions/2c8af9671032_add_qa_document_language.py
+32
-0
batch_create_segment_to_index_task.py
api/tasks/batch_create_segment_to_index_task.py
+90
-0
delete_segment_from_index_task.py
api/tasks/delete_segment_from_index_task.py
+58
-0
No files found.
api/migrations/versions/2c8af9671032_add_qa_document_language.py
0 → 100644
View file @
2be29bb1
"""add_qa_document_language
Revision ID: 2c8af9671032
Revises: 8d2d099ceb74
Create Date: 2023-08-01 18:57:27.294973
"""
from
alembic
import
op
import
sqlalchemy
as
sa
# revision identifiers, used by Alembic.
revision
=
'2c8af9671032'
down_revision
=
'8d2d099ceb74'
branch_labels
=
None
depends_on
=
None
def
upgrade
():
# ### commands auto generated by Alembic - please adjust! ###
with
op
.
batch_alter_table
(
'documents'
,
schema
=
None
)
as
batch_op
:
batch_op
.
add_column
(
sa
.
Column
(
'doc_language'
,
sa
.
String
(
length
=
255
),
nullable
=
True
))
# ### end Alembic commands ###
def
downgrade
():
# ### commands auto generated by Alembic - please adjust! ###
with
op
.
batch_alter_table
(
'documents'
,
schema
=
None
)
as
batch_op
:
batch_op
.
drop_column
(
'doc_language'
)
# ### end Alembic commands ###
api/tasks/batch_create_segment_to_index_task.py
0 → 100644
View file @
2be29bb1
import
datetime
import
logging
import
time
import
uuid
from
typing
import
Optional
,
List
import
click
from
celery
import
shared_task
from
sqlalchemy
import
func
from
werkzeug.exceptions
import
NotFound
from
core.index.index
import
IndexBuilder
from
core.indexing_runner
import
IndexingRunner
from
core.llm.token_calculator
import
TokenCalculator
from
extensions.ext_database
import
db
from
extensions.ext_redis
import
redis_client
from
libs
import
helper
from
models.dataset
import
DocumentSegment
,
Dataset
,
Document
@
shared_task
def
batch_create_segment_to_index_task
(
job_id
:
str
,
content
:
List
,
dataset_id
:
str
,
document_id
:
str
,
tenant_id
:
str
,
user_id
:
str
):
"""
Async batch create segment to index
:param job_id:
:param content:
:param dataset_id:
:param document_id:
:param tenant_id:
:param user_id:
Usage: batch_create_segment_to_index_task.delay(segment_id)
"""
logging
.
info
(
click
.
style
(
'Start batch create segment jobId: {}'
.
format
(
job_id
),
fg
=
'green'
))
start_at
=
time
.
perf_counter
()
indexing_cache_key
=
'segment_batch_import_{}'
.
format
(
job_id
)
try
:
dataset
=
db
.
session
.
query
(
Dataset
)
.
filter
(
Dataset
.
id
==
dataset_id
)
.
first
()
if
not
dataset
:
raise
ValueError
(
'Dataset not exist.'
)
dataset_document
=
db
.
session
.
query
(
Document
)
.
filter
(
Document
.
id
==
document_id
)
.
first
()
if
not
dataset_document
:
raise
ValueError
(
'Document not exist.'
)
if
not
dataset_document
.
enabled
or
dataset_document
.
archived
or
dataset_document
.
indexing_status
!=
'completed'
:
raise
ValueError
(
'Document is not available.'
)
document_segments
=
[]
for
segment
in
content
:
content
=
segment
[
'content'
]
answer
=
segment
[
'answer'
]
doc_id
=
str
(
uuid
.
uuid4
())
segment_hash
=
helper
.
generate_text_hash
(
content
)
# calc embedding use tokens
tokens
=
TokenCalculator
.
get_num_tokens
(
'text-embedding-ada-002'
,
content
)
max_position
=
db
.
session
.
query
(
func
.
max
(
DocumentSegment
.
position
))
.
filter
(
DocumentSegment
.
document_id
==
dataset_document
.
id
)
.
scalar
()
segment_document
=
DocumentSegment
(
tenant_id
=
tenant_id
,
dataset_id
=
dataset_id
,
document_id
=
document_id
,
index_node_id
=
doc_id
,
index_node_hash
=
segment_hash
,
position
=
max_position
+
1
if
max_position
else
1
,
content
=
content
,
word_count
=
len
(
content
),
tokens
=
tokens
,
created_by
=
user_id
,
indexing_at
=
datetime
.
datetime
.
utcnow
(),
status
=
'completed'
,
completed_at
=
datetime
.
datetime
.
utcnow
()
)
if
dataset_document
.
doc_form
==
'qa_model'
:
segment_document
.
answer
=
answer
db
.
session
.
add
(
segment_document
)
document_segments
.
append
(
segment_document
)
# add index to db
indexing_runner
=
IndexingRunner
()
indexing_runner
.
batch_add_segments
(
document_segments
,
dataset
)
db
.
session
.
commit
()
redis_client
.
setex
(
indexing_cache_key
,
600
,
'completed'
)
end_at
=
time
.
perf_counter
()
logging
.
info
(
click
.
style
(
'Segment batch created job: {} latency: {}'
.
format
(
job_id
,
end_at
-
start_at
),
fg
=
'green'
))
except
Exception
as
e
:
logging
.
exception
(
"Segments batch created index failed:{}"
.
format
(
str
(
e
)))
redis_client
.
setex
(
indexing_cache_key
,
600
,
'error'
)
api/tasks/delete_segment_from_index_task.py
0 → 100644
View file @
2be29bb1
import
logging
import
time
import
click
from
celery
import
shared_task
from
werkzeug.exceptions
import
NotFound
from
core.index.index
import
IndexBuilder
from
extensions.ext_database
import
db
from
extensions.ext_redis
import
redis_client
from
models.dataset
import
DocumentSegment
,
Dataset
,
Document
@
shared_task
def
delete_segment_from_index_task
(
segment_id
:
str
,
index_node_id
:
str
,
dataset_id
:
str
,
document_id
:
str
):
"""
Async Remove segment from index
:param segment_id:
:param index_node_id:
:param dataset_id:
:param document_id:
Usage: delete_segment_from_index_task.delay(segment_id)
"""
logging
.
info
(
click
.
style
(
'Start delete segment from index: {}'
.
format
(
segment_id
),
fg
=
'green'
))
start_at
=
time
.
perf_counter
()
indexing_cache_key
=
'segment_{}_delete_indexing'
.
format
(
segment_id
)
try
:
dataset
=
db
.
session
.
query
(
Dataset
)
.
filter
(
Dataset
.
id
==
dataset_id
)
.
first
()
if
not
dataset
:
logging
.
info
(
click
.
style
(
'Segment {} has no dataset, pass.'
.
format
(
segment_id
),
fg
=
'cyan'
))
return
dataset_document
=
db
.
session
.
query
(
Document
)
.
filter
(
Document
.
id
==
document_id
)
.
first
()
if
not
dataset_document
:
logging
.
info
(
click
.
style
(
'Segment {} has no document, pass.'
.
format
(
segment_id
),
fg
=
'cyan'
))
return
if
not
dataset_document
.
enabled
or
dataset_document
.
archived
or
dataset_document
.
indexing_status
!=
'completed'
:
logging
.
info
(
click
.
style
(
'Segment {} document status is invalid, pass.'
.
format
(
segment_id
),
fg
=
'cyan'
))
return
vector_index
=
IndexBuilder
.
get_index
(
dataset
,
'high_quality'
)
kw_index
=
IndexBuilder
.
get_index
(
dataset
,
'economy'
)
# delete from vector index
if
vector_index
:
vector_index
.
delete_by_ids
([
index_node_id
])
# delete from keyword index
kw_index
.
delete_by_ids
([
index_node_id
])
end_at
=
time
.
perf_counter
()
logging
.
info
(
click
.
style
(
'Segment deleted from index: {} latency: {}'
.
format
(
segment_id
,
end_at
-
start_at
),
fg
=
'green'
))
except
Exception
:
logging
.
exception
(
"delete segment from index failed"
)
finally
:
redis_client
.
delete
(
indexing_cache_key
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment