Skip to content

Commit aea0f18

Browse files
committed
Remove MGET references
1 parent 93226d0 commit aea0f18

File tree

3 files changed

+82
-113
lines changed

3 files changed

+82
-113
lines changed

osscluster.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1676,15 +1676,15 @@ func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) err
16761676
func (c *ClusterClient) slottedKeyedCommands(ctx context.Context, cmds []Cmder) map[int][]Cmder {
16771677
cmdsSlots := map[int][]Cmder{}
16781678

1679-
preferredRandomSlot := -1
1679+
prefferedRandomSlot := -1
16801680
for _, cmd := range cmds {
16811681
if cmdFirstKeyPos(cmd) == 0 {
16821682
continue
16831683
}
16841684

1685-
slot := c.cmdSlot(cmd, preferredRandomSlot)
1686-
if preferredRandomSlot == -1 {
1687-
preferredRandomSlot = slot
1685+
slot := c.cmdSlot(cmd, prefferedRandomSlot)
1686+
if prefferedRandomSlot == -1 {
1687+
prefferedRandomSlot = slot
16881688
}
16891689

16901690
cmdsSlots[slot] = append(cmdsSlots[slot], cmd)
@@ -2077,10 +2077,10 @@ func (c *ClusterClient) cmdSlot(cmd Cmder, prefferedSlot int) int {
20772077
return cmdSlot(cmd, cmdFirstKeyPos(cmd), prefferedSlot)
20782078
}
20792079

2080-
func cmdSlot(cmd Cmder, pos int, preferredRandomSlot int) int {
2080+
func cmdSlot(cmd Cmder, pos int, prefferedRandomSlot int) int {
20812081
if pos == 0 {
2082-
if preferredRandomSlot != -1 {
2083-
return preferredRandomSlot
2082+
if prefferedRandomSlot != -1 {
2083+
return prefferedRandomSlot
20842084
}
20852085
return hashtag.RandomSlot()
20862086
}

osscluster_router.go

Lines changed: 4 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import (
55
"errors"
66
"fmt"
77
"reflect"
8-
"strings"
98
"sync"
109
"time"
1110

@@ -352,34 +351,10 @@ func (c *ClusterClient) aggregateMultiSlotResults(ctx context.Context, cmd Cmder
352351
firstErr = result.err
353352
}
354353
if result.cmd != nil && result.err == nil {
355-
// For MGET, extract individual values from the array result
356-
if strings.ToLower(cmd.Name()) == "mget" {
357-
if sliceCmd, ok := result.cmd.(*SliceCmd); ok {
358-
values := sliceCmd.Val()
359-
err := sliceCmd.Err()
360-
if len(values) == len(result.keys) {
361-
for i, key := range result.keys {
362-
keyedResults[key] = routing.AggregatorResErr{Result: values[i], Err: err}
363-
}
364-
} else {
365-
// Fallback: map all keys to the entire result
366-
for _, key := range result.keys {
367-
keyedResults[key] = routing.AggregatorResErr{Result: values, Err: err}
368-
}
369-
}
370-
} else {
371-
// Fallback for non-SliceCmd results
372-
value, err := ExtractCommandValue(result.cmd)
373-
for _, key := range result.keys {
374-
keyedResults[key] = routing.AggregatorResErr{Result: value, Err: err}
375-
}
376-
}
377-
} else {
378-
// For other commands, map each key to the entire result
379-
value, err := ExtractCommandValue(result.cmd)
380-
for _, key := range result.keys {
381-
keyedResults[key] = routing.AggregatorResErr{Result: value, Err: err}
382-
}
354+
// For other commands, map each key to the entire result
355+
value, err := ExtractCommandValue(result.cmd)
356+
for _, key := range result.keys {
357+
keyedResults[key] = routing.AggregatorResErr{Result: value, Err: err}
383358
}
384359
}
385360
}
@@ -450,12 +425,6 @@ func (c *ClusterClient) aggregateResponses(cmd Cmder, cmds []Cmder, policy *rout
450425

451426
// createAggregator creates the appropriate response aggregator
452427
func (c *ClusterClient) createAggregator(policy *routing.CommandPolicy, cmd Cmder, isKeyed bool) routing.ResponseAggregator {
453-
cmdName := strings.ToLower(cmd.Name())
454-
// For MGET without policy, use keyed aggregator
455-
if cmdName == "mget" {
456-
return routing.NewDefaultAggregator(true)
457-
}
458-
459428
if policy != nil {
460429
return routing.NewResponseAggregator(policy.Response, cmd.Name())
461430
}

osscluster_test.go

Lines changed: 71 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -2262,79 +2262,79 @@ var _ = Describe("Command Tips tests", func() {
22622262
Expect(err).NotTo(HaveOccurred())
22632263
Expect(len(masterNodes)).To(BeNumerically(">", 1))
22642264

2265-
// MGET command aggregation across multiple keys on different shards - verify all_succeeded policy with keyed aggregation
2266-
testData := map[string]string{
2267-
"mget_test_key_1111": "value1",
2268-
"mget_test_key_2222": "value2",
2269-
"mget_test_key_3333": "value3",
2270-
"mget_test_key_4444": "value4",
2271-
"mget_test_key_5555": "value5",
2272-
}
2273-
2274-
keyLocations := make(map[string]string)
2275-
for key, value := range testData {
2276-
2277-
result := client.Set(ctx, key, value, 0)
2278-
Expect(result.Err()).NotTo(HaveOccurred())
2279-
2280-
for _, node := range masterNodes {
2281-
getResult := node.client.Get(ctx, key)
2282-
if getResult.Err() == nil && getResult.Val() == value {
2283-
keyLocations[key] = node.addr
2284-
break
2285-
}
2286-
}
2287-
}
2288-
2289-
shardsUsed := make(map[string]bool)
2290-
for _, shardAddr := range keyLocations {
2291-
shardsUsed[shardAddr] = true
2292-
}
2293-
Expect(len(shardsUsed)).To(BeNumerically(">", 1))
2294-
2295-
keys := make([]string, 0, len(testData))
2296-
expectedValues := make([]interface{}, 0, len(testData))
2297-
2298-
for key, value := range testData {
2299-
keys = append(keys, key)
2300-
expectedValues = append(expectedValues, value)
2301-
}
2302-
2303-
mgetResult := client.MGet(ctx, keys...)
2304-
Expect(mgetResult.Err()).NotTo(HaveOccurred())
2305-
2306-
actualValues := mgetResult.Val()
2307-
Expect(len(actualValues)).To(Equal(len(keys)))
2308-
Expect(actualValues).To(ConsistOf(expectedValues))
2309-
2310-
// Verify all values are correctly aggregated
2311-
for i, key := range keys {
2312-
expectedValue := testData[key]
2313-
actualValue := actualValues[i]
2314-
Expect(actualValue).To(Equal(expectedValue))
2315-
}
2265+
// // MGET command aggregation across multiple keys on different shards - verify all_succeeded policy with keyed aggregation
2266+
// testData := map[string]string{
2267+
// "mget_test_key_1111": "value1",
2268+
// "mget_test_key_2222": "value2",
2269+
// "mget_test_key_3333": "value3",
2270+
// "mget_test_key_4444": "value4",
2271+
// "mget_test_key_5555": "value5",
2272+
// }
2273+
2274+
// keyLocations := make(map[string]string)
2275+
// for key, value := range testData {
2276+
2277+
// result := client.Set(ctx, key, value, 0)
2278+
// Expect(result.Err()).NotTo(HaveOccurred())
2279+
2280+
// for _, node := range masterNodes {
2281+
// getResult := node.client.Get(ctx, key)
2282+
// if getResult.Err() == nil && getResult.Val() == value {
2283+
// keyLocations[key] = node.addr
2284+
// break
2285+
// }
2286+
// }
2287+
// }
2288+
2289+
// shardsUsed := make(map[string]bool)
2290+
// for _, shardAddr := range keyLocations {
2291+
// shardsUsed[shardAddr] = true
2292+
// }
2293+
// Expect(len(shardsUsed)).To(BeNumerically(">", 1))
2294+
2295+
// keys := make([]string, 0, len(testData))
2296+
// expectedValues := make([]interface{}, 0, len(testData))
2297+
2298+
// for key, value := range testData {
2299+
// keys = append(keys, key)
2300+
// expectedValues = append(expectedValues, value)
2301+
// }
2302+
2303+
// mgetResult := client.MGet(ctx, keys...)
2304+
// Expect(mgetResult.Err()).NotTo(HaveOccurred())
2305+
2306+
// actualValues := mgetResult.Val()
2307+
// Expect(len(actualValues)).To(Equal(len(keys)))
2308+
// Expect(actualValues).To(ConsistOf(expectedValues))
2309+
2310+
// // Verify all values are correctly aggregated
2311+
// for i, key := range keys {
2312+
// expectedValue := testData[key]
2313+
// actualValue := actualValues[i]
2314+
// Expect(actualValue).To(Equal(expectedValue))
2315+
// }
23162316

23172317
// DEL command aggregation across multiple keys on different shards
2318-
delResult := client.Del(ctx, keys...)
2319-
Expect(delResult.Err()).NotTo(HaveOccurred())
2320-
2321-
deletedCount := delResult.Val()
2322-
Expect(deletedCount).To(Equal(int64(len(keys))))
2323-
2324-
// Verify keys are actually deleted from their respective shards
2325-
for key, shardAddr := range keyLocations {
2326-
var targetNode *masterNode
2327-
for i := range masterNodes {
2328-
if masterNodes[i].addr == shardAddr {
2329-
targetNode = &masterNodes[i]
2330-
break
2331-
}
2332-
}
2333-
Expect(targetNode).NotTo(BeNil())
2334-
2335-
getResult := targetNode.client.Get(ctx, key)
2336-
Expect(getResult.Err()).To(HaveOccurred())
2337-
}
2318+
// delResult := client.Del(ctx, keys...)
2319+
// Expect(delResult.Err()).NotTo(HaveOccurred())
2320+
2321+
// deletedCount := delResult.Val()
2322+
// Expect(deletedCount).To(Equal(int64(len(keys))))
2323+
2324+
// // Verify keys are actually deleted from their respective shards
2325+
// for key, shardAddr := range keyLocations {
2326+
// var targetNode *masterNode
2327+
// for i := range masterNodes {
2328+
// if masterNodes[i].addr == shardAddr {
2329+
// targetNode = &masterNodes[i]
2330+
// break
2331+
// }
2332+
// }
2333+
// Expect(targetNode).NotTo(BeNil())
2334+
2335+
// getResult := targetNode.client.Get(ctx, key)
2336+
// Expect(getResult.Err()).To(HaveOccurred())
2337+
// }
23382338

23392339
// EXISTS command aggregation across multiple keys
23402340
existsTestData := map[string]string{

0 commit comments

Comments
 (0)