minersunion commited on
Commit
a5e0fdc
·
1 Parent(s): 23ab49d

adjusted preview

Browse files
Files changed (3) hide show
  1. README.md +8 -0
  2. conversations_train.parquet +0 -3
  3. dataset.py +24 -89
README.md CHANGED
@@ -22,6 +22,14 @@ You can find more about our subnet on GitHub [here](https://github.com/afterpart
22
 
23
  ---
24
 
 
 
 
 
 
 
 
 
25
  ## 📋 Dataset Overview
26
 
27
  This dataset contains **annotated conversation transcripts** with:
 
22
 
23
  ---
24
 
25
+ ## Full Vectors Access
26
+
27
+ ➡️ **Download the full uncompressed conversation tags embeddings** from [here](https://huggingface.co/datasets/ReadyAi/5000-podcast-conversations-with-metadata-and-embedding-dataset/tree/main/data)
28
+
29
+ For large-scale processing and fine-tuning.
30
+
31
+ ---
32
+
33
  ## 📋 Dataset Overview
34
 
35
  This dataset contains **annotated conversation transcripts** with:
conversations_train.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9477b7504b3e6efa6911eff198e86cd1cc7262e0e1ef39db78b26041cd5e8b31
3
- size 149841685
 
 
 
 
dataset.py CHANGED
@@ -1,103 +1,38 @@
1
- import glob
2
-
3
- import datasets
4
  import pandas as pd
5
  from datasets import DatasetInfo, Features, GeneratorBasedBuilder, Sequence, Split, SplitGenerator, Value
6
 
7
 
8
  class PodcastConversationsWithMetadataAndEmbedding(GeneratorBasedBuilder):
9
- BUILDER_CONFIGS = [
10
- datasets.BuilderConfig(name="conversations", version=datasets.Version("1.0.0"), description="Transcripts with speaker metadata"),
11
- datasets.BuilderConfig(name="full", version=datasets.Version("1.0.0"), description="Tag embeddings for all conversations"),
12
- datasets.BuilderConfig(name="conversations_to_tags", version=datasets.Version("1.0.0"), description="Tag predictions with similarity scores"),
13
- datasets.BuilderConfig(name="tag_to_id", version=datasets.Version("1.0.0"), description="Dictionary of tag IDs"),
14
- ]
15
-
16
  def _info(self):
17
- if self.config.name == "conversations":
18
- features = Features(
19
  {
20
  "c_guid": Value("string"),
21
  "participants": Sequence(Value("string")),
22
- "transcript": Sequence(
23
- {
24
- "chunk": Value("string"),
25
- "speaker": Value("string"),
26
- "text": Value("string"),
27
- }
28
- ),
29
- }
30
- )
31
- elif self.config.name == "full":
32
- features = Features(
33
- {
34
- "c_guid": Value("string"),
35
- "tag_id": Value("int64"),
36
- "tag": Value("string"),
37
- "vector": Sequence(Value("float32")),
38
- }
39
- )
40
- elif self.config.name == "conversations_to_tags":
41
- features = Features({"c_guid": Value("string"), "tag_id": Value("int64"), "tag": Value("string")})
42
- elif self.config.name == "tag_to_id":
43
- features = Features(
44
- {
45
- "tag_id": Value("int64"),
46
- "tag": Value("string"),
47
  }
48
  )
49
- else:
50
- raise ValueError(f"Unknown config name: {self.config.name}")
51
-
52
- return DatasetInfo(
53
- features=features,
54
- description=f"Split for {self.config.name} of the podcast conversation dataset.",
55
  )
56
 
57
  def _split_generators(self, dl_manager):
58
- if self.config.name == "conversations":
59
- return [
60
- SplitGenerator(
61
- name=Split.TRAIN,
62
- gen_kwargs={"filepaths": ["conversations_train.parquet"]},
63
- )
64
- ]
65
- elif self.config.name == "full":
66
- return [
67
- SplitGenerator(
68
- name=Split.TRAIN,
69
- gen_kwargs={"filepaths": sorted(glob.glob("data/bittensor-conversational-tags-and-embeddings-part-*.parquet"))},
70
- ),
71
- ]
72
- elif self.config.name == "conversations_to_tags":
73
- return [
74
- SplitGenerator(
75
- name=Split.TRAIN,
76
- gen_kwargs={"filepaths": ["conversations_to_tags.parquet"]},
77
- ),
78
- ]
79
- elif self.config.name == "tag_to_id":
80
- return [
81
- SplitGenerator(
82
- name=Split.TRAIN,
83
- gen_kwargs={"filepaths": ["tag_to_id.parquet"]},
84
- ),
85
- ]
86
- else:
87
- raise ValueError(f"Unknown config name: {self.config.name}")
88
-
89
- def _generate_examples(self, filepaths):
90
- for path in filepaths:
91
- df = pd.read_parquet(path)
92
-
93
- for idx, row in df.iterrows():
94
- record = row.to_dict()
95
-
96
- # Normalize vector field if it exists
97
- if "vector" in record:
98
- if isinstance(record["vector"], (list, tuple)):
99
- record["vector"] = list(map(float, record["vector"]))
100
- else:
101
- record["vector"] = []
102
-
103
- yield f"{path}-{idx}", record
 
 
 
 
1
  import pandas as pd
2
  from datasets import DatasetInfo, Features, GeneratorBasedBuilder, Sequence, Split, SplitGenerator, Value
3
 
4
 
5
  class PodcastConversationsWithMetadataAndEmbedding(GeneratorBasedBuilder):
 
 
 
 
 
 
 
6
  def _info(self):
7
+ return DatasetInfo(
8
+ features=Features(
9
  {
10
  "c_guid": Value("string"),
11
  "participants": Sequence(Value("string")),
12
+ "transcript": Sequence({"chunk": Value("string"), "speaker": Value("string"), "text": Value("string")}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  }
14
  )
 
 
 
 
 
 
15
  )
16
 
17
  def _split_generators(self, dl_manager):
18
+ return [
19
+ SplitGenerator(
20
+ name=Split.TRAIN,
21
+ gen_kwargs={"filepath": "conversations.parquet"},
22
+ ),
23
+ ]
24
+
25
+ def _generate_examples(self, filepath):
26
+ df = pd.read_parquet(filepath)
27
+
28
+ for idx, row in df.iterrows():
29
+ record = row.to_dict()
30
+
31
+ # Normalize vector field if it exists
32
+ if "vector" in record:
33
+ if isinstance(record["vector"], (list, tuple)):
34
+ record["vector"] = list(map(float, record["vector"]))
35
+ else:
36
+ record["vector"] = []
37
+
38
+ yield idx, record