xref: /qemu/hw/net/vmware_utils.h (revision 75020a7021513ad4cbad2aa5f6de5d390016f099)
1*75020a70SDmitry Fleytman /*
2*75020a70SDmitry Fleytman  * QEMU VMWARE paravirtual devices - auxiliary code
3*75020a70SDmitry Fleytman  *
4*75020a70SDmitry Fleytman  * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5*75020a70SDmitry Fleytman  *
6*75020a70SDmitry Fleytman  * Developed by Daynix Computing LTD (http://www.daynix.com)
7*75020a70SDmitry Fleytman  *
8*75020a70SDmitry Fleytman  * Authors:
9*75020a70SDmitry Fleytman  * Dmitry Fleytman <dmitry@daynix.com>
10*75020a70SDmitry Fleytman  * Yan Vugenfirer <yan@daynix.com>
11*75020a70SDmitry Fleytman  *
12*75020a70SDmitry Fleytman  * This work is licensed under the terms of the GNU GPL, version 2 or later.
13*75020a70SDmitry Fleytman  * See the COPYING file in the top-level directory.
14*75020a70SDmitry Fleytman  *
15*75020a70SDmitry Fleytman  */
16*75020a70SDmitry Fleytman 
17*75020a70SDmitry Fleytman #ifndef VMWARE_UTILS_H
18*75020a70SDmitry Fleytman #define VMWARE_UTILS_H
19*75020a70SDmitry Fleytman 
20*75020a70SDmitry Fleytman #include "qemu/range.h"
21*75020a70SDmitry Fleytman 
22*75020a70SDmitry Fleytman #ifndef VMW_SHPRN
23*75020a70SDmitry Fleytman #define VMW_SHPRN(fmt, ...) do {} while (0)
24*75020a70SDmitry Fleytman #endif
25*75020a70SDmitry Fleytman 
26*75020a70SDmitry Fleytman /*
27*75020a70SDmitry Fleytman  * Shared memory access functions with byte swap support
28*75020a70SDmitry Fleytman  * Each function contains printout for reverse-engineering needs
29*75020a70SDmitry Fleytman  *
30*75020a70SDmitry Fleytman  */
31*75020a70SDmitry Fleytman static inline void
32*75020a70SDmitry Fleytman vmw_shmem_read(hwaddr addr, void *buf, int len)
33*75020a70SDmitry Fleytman {
34*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
35*75020a70SDmitry Fleytman     cpu_physical_memory_read(addr, buf, len);
36*75020a70SDmitry Fleytman }
37*75020a70SDmitry Fleytman 
38*75020a70SDmitry Fleytman static inline void
39*75020a70SDmitry Fleytman vmw_shmem_write(hwaddr addr, void *buf, int len)
40*75020a70SDmitry Fleytman {
41*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
42*75020a70SDmitry Fleytman     cpu_physical_memory_write(addr, buf, len);
43*75020a70SDmitry Fleytman }
44*75020a70SDmitry Fleytman 
45*75020a70SDmitry Fleytman static inline void
46*75020a70SDmitry Fleytman vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
47*75020a70SDmitry Fleytman {
48*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
49*75020a70SDmitry Fleytman               addr, len, buf, is_write);
50*75020a70SDmitry Fleytman 
51*75020a70SDmitry Fleytman     cpu_physical_memory_rw(addr, buf, len, is_write);
52*75020a70SDmitry Fleytman }
53*75020a70SDmitry Fleytman 
54*75020a70SDmitry Fleytman static inline void
55*75020a70SDmitry Fleytman vmw_shmem_set(hwaddr addr, uint8 val, int len)
56*75020a70SDmitry Fleytman {
57*75020a70SDmitry Fleytman     int i;
58*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
59*75020a70SDmitry Fleytman 
60*75020a70SDmitry Fleytman     for (i = 0; i < len; i++) {
61*75020a70SDmitry Fleytman         cpu_physical_memory_write(addr + i, &val, 1);
62*75020a70SDmitry Fleytman     }
63*75020a70SDmitry Fleytman }
64*75020a70SDmitry Fleytman 
65*75020a70SDmitry Fleytman static inline uint32_t
66*75020a70SDmitry Fleytman vmw_shmem_ld8(hwaddr addr)
67*75020a70SDmitry Fleytman {
68*75020a70SDmitry Fleytman     uint8_t res = ldub_phys(addr);
69*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
70*75020a70SDmitry Fleytman     return res;
71*75020a70SDmitry Fleytman }
72*75020a70SDmitry Fleytman 
73*75020a70SDmitry Fleytman static inline void
74*75020a70SDmitry Fleytman vmw_shmem_st8(hwaddr addr, uint8_t value)
75*75020a70SDmitry Fleytman {
76*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
77*75020a70SDmitry Fleytman     stb_phys(addr, value);
78*75020a70SDmitry Fleytman }
79*75020a70SDmitry Fleytman 
80*75020a70SDmitry Fleytman static inline uint32_t
81*75020a70SDmitry Fleytman vmw_shmem_ld16(hwaddr addr)
82*75020a70SDmitry Fleytman {
83*75020a70SDmitry Fleytman     uint16_t res = lduw_le_phys(addr);
84*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
85*75020a70SDmitry Fleytman     return res;
86*75020a70SDmitry Fleytman }
87*75020a70SDmitry Fleytman 
88*75020a70SDmitry Fleytman static inline void
89*75020a70SDmitry Fleytman vmw_shmem_st16(hwaddr addr, uint16_t value)
90*75020a70SDmitry Fleytman {
91*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
92*75020a70SDmitry Fleytman     stw_le_phys(addr, value);
93*75020a70SDmitry Fleytman }
94*75020a70SDmitry Fleytman 
95*75020a70SDmitry Fleytman static inline uint32_t
96*75020a70SDmitry Fleytman vmw_shmem_ld32(hwaddr addr)
97*75020a70SDmitry Fleytman {
98*75020a70SDmitry Fleytman     uint32_t res = ldl_le_phys(addr);
99*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
100*75020a70SDmitry Fleytman     return res;
101*75020a70SDmitry Fleytman }
102*75020a70SDmitry Fleytman 
103*75020a70SDmitry Fleytman static inline void
104*75020a70SDmitry Fleytman vmw_shmem_st32(hwaddr addr, uint32_t value)
105*75020a70SDmitry Fleytman {
106*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
107*75020a70SDmitry Fleytman     stl_le_phys(addr, value);
108*75020a70SDmitry Fleytman }
109*75020a70SDmitry Fleytman 
110*75020a70SDmitry Fleytman static inline uint64_t
111*75020a70SDmitry Fleytman vmw_shmem_ld64(hwaddr addr)
112*75020a70SDmitry Fleytman {
113*75020a70SDmitry Fleytman     uint64_t res = ldq_le_phys(addr);
114*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
115*75020a70SDmitry Fleytman     return res;
116*75020a70SDmitry Fleytman }
117*75020a70SDmitry Fleytman 
118*75020a70SDmitry Fleytman static inline void
119*75020a70SDmitry Fleytman vmw_shmem_st64(hwaddr addr, uint64_t value)
120*75020a70SDmitry Fleytman {
121*75020a70SDmitry Fleytman     VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
122*75020a70SDmitry Fleytman     stq_le_phys(addr, value);
123*75020a70SDmitry Fleytman }
124*75020a70SDmitry Fleytman 
125*75020a70SDmitry Fleytman /* Macros for simplification of operations on array-style registers */
126*75020a70SDmitry Fleytman 
127*75020a70SDmitry Fleytman /*
128*75020a70SDmitry Fleytman  * Whether <addr> lies inside of array-style register defined by <base>,
129*75020a70SDmitry Fleytman  * number of elements (<cnt>) and element size (<regsize>)
130*75020a70SDmitry Fleytman  *
131*75020a70SDmitry Fleytman */
132*75020a70SDmitry Fleytman #define VMW_IS_MULTIREG_ADDR(addr, base, cnt, regsize)                 \
133*75020a70SDmitry Fleytman     range_covers_byte(base, cnt * regsize, addr)
134*75020a70SDmitry Fleytman 
135*75020a70SDmitry Fleytman /*
136*75020a70SDmitry Fleytman  * Returns index of given register (<addr>) in array-style register defined by
137*75020a70SDmitry Fleytman  * <base> and element size (<regsize>)
138*75020a70SDmitry Fleytman  *
139*75020a70SDmitry Fleytman */
140*75020a70SDmitry Fleytman #define VMW_MULTIREG_IDX_BY_ADDR(addr, base, regsize)                  \
141*75020a70SDmitry Fleytman     (((addr) - (base)) / (regsize))
142*75020a70SDmitry Fleytman 
143*75020a70SDmitry Fleytman #endif
144